repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
delos/microhalo-models | [
"aaebc1a1fffbff8c6fd561d9936229e637926f5b"
] | [
"examples/annihilation-suppression/profiles.py"
] | [
"import numpy as np\r\nfrom scipy.special import spence\r\n\r\nlog2 = np.log(2)\r\nlog4 = np.log(4)\r\n\r\nsupported_params = [\r\n [1,3,1], # NFW\r\n [1,3,1.5], # Moore\r\n [2,3,0], # cored\r\n]\r\n\r\ndef density_norm(params):\r\n if params == [1,3,1]:\r\n return 1.\r\n elif params == [1,3,1.5]:\r\n return .5\r\n elif params == [2,3,0]:\r\n return 1.\r\ndef density(R,params):\r\n if params == [1,3,1]:\r\n return 1./(R*(1+R)**2)\r\n elif params == [1,3,1.5]:\r\n return .5/(R**1.5*(1+R)**1.5)\r\n elif params == [2,3,0]:\r\n return 1./(1+R**2)**1.5\r\ndef mass(R,params):\r\n if params == [1,3,1]:\r\n return np.where(R<.1,\r\n R**2/2. - 2.*R**3/3. + 3.*R**4/4 - 4*R**5/5 + 5.*R**6/6,\r\n np.log(1+R)-R/(1.+R)\r\n )\r\n elif params == [1,3,1.5]:\r\n return np.where(R<.1,\r\n R**1.5/3. - 3.*R**2.5/10 + 15.*R**3.5/56 - 35*R**4.5/144 + 315.*R**5.5/1408,\r\n np.arcsinh(R**.5)-(R/(1.+R))**.5\r\n )\r\n elif params == [2,3,0]:\r\n return np.where(R<.1,\r\n R**3/3. - 3.*R**5/10 + 15.*R**7/56,\r\n np.arcsinh(R)-R/(1.+R**2)**.5\r\n )\r\ndef integrated_density_over_r(R,params):\r\n if params == [1,3,1]:\r\n return R/(1.+R)\r\n elif params == [1,3,1.5]:\r\n return np.sqrt(R/(1.+R))\r\n elif params == [2,3,0]:\r\n return 1.-1./np.sqrt(1.+R**2)\r\ndef density_mean(R,params):\r\n if params == [2,3,0]:\r\n return np.where(R<.1,\r\n 1.-9.*R**2/10+45.*R**4/56-35.*R**6/48+945.*R**8/1408,\r\n np.divide(mass(R,params)*3.,R**3,where=R>0)\r\n )\r\n else:\r\n return mass(R,params)*3./R**3\r\ndef r3_over_mass(R,params):\r\n if params == [1,3,1]:\r\n return np.where(R<.1,\r\n 2*R + 8*R**2/3. + 5*R**3/9. - 8*R**4/135. + 17*R**5/810. - 86*R**6/8505.,\r\n R**3/mass(R,params)\r\n )\r\n elif params == [1,3,1.5]:\r\n return np.where(R<.1,\r\n 3*R**1.5 + 27*R**2.5/10 + 27*R**3.5/1400 + 493*R**4.5/14000 - 1231029*R**5.5/43120000,\r\n R**3/mass(R,params)\r\n )\r\n elif params == [2,3,0]:\r\n return np.where(R<.1,\r\n 3 + 27*R**2/10. + 27*R**4/1400. + 493*R**6/14000. - 1231029*R**8/43120000.,\r\n R**3/mass(R,params)\r\n )\r\ndef potential(R,params):\r\n if params == [1,3,1]:\r\n return np.where(R<.1,\r\n 1.-R/2.+R**2/3.-R**3/4.+R**4/5.-R**5/6.+R**6/7.,\r\n np.divide(np.log(1+R),R,where=R>0)\r\n )\r\n elif params == [1,3,1.5]:\r\n return np.where(R<.1,\r\n 1.-2.*R**.5/3.+R**1.5/5.-3.*R**2.5/28.+5.*R**3.5/72.-35.*R**4.5/704.+63.*R**5.5/1664.,\r\n np.divide(R-(R+R**2)**.5+np.arcsinh(R**.5),R,where=R>0)\r\n )\r\n elif params == [2,3,0]:\r\n return np.where(R<.1,\r\n 1.-R**2/6.+3.*R**4/40.-5.*R**6/112.+35.*R**8/1152.,\r\n np.divide(np.arcsinh(R),R,where=R>0)\r\n )\r\ndef velocity_dispersion_radial_squared(R,params):\r\n logR = np.log(R,where=R>0)\r\n if params == [1,3,1]:\r\n return np.where(R<.1,\r\n .25*(-23+2*np.pi**2-2*logR)*R+(-59./6+np.pi**2-logR)*R**2+1./24*(-101+12*np.pi**2-12*logR)*R**3+(11*R**4)/60.-(13*R**5)/240.+(37*R**6)/1400.,\r\n np.where(R>10.,\r\n np.divide(-3./16+logR/4,R,where=R>10.) + np.divide(69./200+logR/10,R**2,where=R>10.) + np.divide(-97./1200-logR/20,R**3,where=R>10.) + np.divide(71./3675+logR/35,R**4,where=R>10.) + np.divide(-1./3136-logR/56,R**5,where=R>10.) + np.divide(-1271./211680+logR/84,R**6,where=R>10.),\r\n .5*(-1+R*(-9-7*R+np.pi**2*(1+R)**2)-R*(1+R)**2*logR+np.divide((1+R)*np.log(1+R)*(1+R*(-3+(-5+R)*R)+3*R**2*(1+R)*np.log(1+R)),R,where=R>0)+6*R*(1+R)**2*spence(1+R))\r\n )\r\n )\r\n elif params == [1,3,1.5]:\r\n return np.where(R<.1,\r\n R**.5/3+1./150*(817-960*log2+120*logR)*R**1.5+(8683./1400-48*log2/5+6*logR/5)*R**2.5+(19861./25200-12*log2/5+3*logR/10)*R**3.5+(-461803./2217600+2*log2/5-logR/20)*R**4.5+(1576591./19219200-3*log2/20+3*logR/160)*R**5.5,\r\n np.where(R>10.,\r\n np.divide(-7+4*(log4+logR),32*R,where=R>10.) + 3.*np.divide(49+20*(log4+logR),1600*R**2,where=R>10.) + np.divide(-79-420*(log4+logR),19200*R**3,where=R>10.) + np.divide(-3589+3780*(log4+logR),268800*R**4,where=R>10.) + np.divide(48311-27720*(log4+logR),2867200*R**5,where=R>10.) + np.divide(-285041+120120*(log4+logR),17203200*R**6,where=R>10.),\r\n .2*(R*(1+R))**1.5*(5./(1+R)+np.divide(-2.+7*R,R**2,where=R>0)+np.divide(2*(1+2*R*(-1+4*R+8*R**2))*np.arcsinh(R**.5),np.sqrt(R**5*(1+R)),where=R>0)+4*np.log(R/(256*(1+R)**5),where=R>0))\r\n )\r\n )\r\n elif params == [2,3,0]:\r\n return np.where(R<.1,\r\n 1.5-2*log2+(25./12-3*log2)*R**2+(41./80-3*log2/4)*R**4+(-269./3360+log2/8)*R**6+(2171./80640-3*log2/64)*R**8,\r\n np.where(R>10.,\r\n np.divide(-3-12*log2+8*log4+4*logR,16*R,where=R>10.) + np.divide(5+12*log2+12*logR,96*R**3,where=R>10.) + np.divide(13-24*log2-24*logR,512*R**5,where=R>10.) + np.divide(-391+360*log2+360*logR,15360*R**7,where=R>10.),\r\n np.divide((2+6*R**2+4*R**4)*np.arcsinh(R)+R*(1+R**2)**.5*(1-2*(1+R**2)*np.log(4*(1+R**2))),2*R,where=R>0)\r\n )\r\n )\r\n \r\ndef velocity_dispersion_squared(R,params):\r\n return 3*velocity_dispersion_radial_squared(R,params)\r\n\r\ndef KE_circular(R,params):\r\n if params == [1,3,1]:\r\n return np.where(R<.1,\r\n R/4.-R**2/3.+3.*R**3/8-2.*R**4/5+5.*R**5/12-3.*R**6/7,\r\n np.divide(mass(R,params),2*R,where=R>0)\r\n )\r\n elif params == [1,3,1.5]:\r\n return np.where(R<.1,\r\n R**.5/6.-3.*R**1.5/20+15.*R**2.5/112-35.*R**3.5/288+315.*R**4.5/2816-693.*R**5.5/6656,\r\n np.divide(mass(R,params),2*R,where=R>0)\r\n )\r\n elif params == [2,3,0]:\r\n return np.where(R<.1,\r\n R**2/6.-3.*R**4/20+15.*R**6/112-35.*R**8/288,\r\n np.divide(mass(R,params),2*R,where=R>0)\r\n )\r\n\r\ndef d2density_dpotential2(R,params):\r\n if params == [1,3,1]:\r\n return np.divide(R**3*(R*(-2+4*R-R**3+R**4)-2*(-1+R+2*R**2)*np.log(1+R)),(1+R)**2*(-R+(1+R)*np.log(1+R))**3)\r\n elif params == [1,3,1.5]:\r\n return np.divide(R**3*(3-12*R+R**3-2*R**4)+3*R**2.5*np.sqrt(1+R)*(-1+4*R)*np.arcsinh(np.sqrt(R)),8*(1+R)**4*(np.sqrt(R/(1+R))-np.arcsinh(np.sqrt(R)))**3)\r\n elif params == [2,3,0]:\r\n return np.divide(R**4*(R*(-6+9*R**2-2*R**4+R**6)-3*np.sqrt(1+R**2)*(-2+3*R**2)*np.arcsinh(R)),(1+R**2)**2.5*(-R+np.sqrt(1+R**2)*np.arcsinh(R))**3)\r\n\r\ndef dPsidR(R,params):\r\n if params == [1,3,1]:\r\n return (R/(1 + R) - np.log(1 + R))/R**2\r\n elif params == [1,3,1.5]:\r\n return (np.sqrt(R/(1 + R)) - np.arcsinh(np.sqrt(R)))/(R**2)\r\n elif params == [2,3,0]:\r\n return (R/np.sqrt(1 + R**2) - np.arcsinh(R))/R**2\r\n\r\ndef d2PsidR2(R,params):\r\n if params == [1,3,1]:\r\n return (-((R*(2 + 3*R))/(1 + R)**2) + 2*np.log(1 + R))/R**3\r\n elif params == [1,3,1.5]:\r\n return -((4 + 5*R)/(2*R**2.5*(1 + R)**1.5)) + (2*np.arcsinh(np.sqrt(R)))/R**3\r\n elif params == [2,3,0]:\r\n return (-2*R - 3*R**3 + 2*(1 + R**2)**1.5*np.arcsinh(R))/(R**3*(1 + R**2)**1.5)\r\n\r\ndef F(E,params): # fitting form from Widrow 2000\r\n if params == [1,3,1]:\r\n l = 5./2\r\n F0 = 9.1968e-2\r\n q = -2.7419\r\n p = np.array([.362,-.5639,-.0859,-.4912])\r\n elif params == [1,3,1.5]:\r\n l = 9./2\r\n F0 = 4.8598e-1\r\n q = -2.8216\r\n p = np.array([.3526,-5.199,3.5461,-.884])\r\n elif params == [2,3,0]:\r\n l = 0.\r\n F0 = 5.8807e-2\r\n q = -2.6312\r\n p = np.array([-3.7147,41.045,-132.2,216.9,-170.23,51.606])\r\n P = 0\r\n for i,pi in enumerate(p):\r\n P += pi*E**(i+1)\r\n return (\r\n F0*np.power(E,1.5,where=E>0)*np.power(1-E,-l,where=E<1)\r\n *np.where(E>0.99,1+(1-E)/2.+(1-E)**2/3.+(1-E)**3/4.+(1-E)**4/5.,np.divide(-np.log(E,where=E>0),1-E,where=E<1))**q\r\n *np.exp(P)\r\n )\r\n\r\ndef F_aniso(E,L,Ra):\r\n Q = E-L**2/(2*Ra**2)\r\n l = 5./2\r\n if Ra == 0.6:\r\n F0 = 1.0885e-1\r\n q = -1.0468\r\n p = np.array([-1.6805,18.360,-151.72,336.71,-288.09,85.472])\r\n elif Ra == 1:\r\n F0 = 3.8287e-2\r\n q = -1.0389\r\n p = np.array([0.3497,-12.253,-9.1225,101.15,-127.43,47.401])\r\n elif Ra == 3:\r\n F0 = 4.2486e-3\r\n q = -1.0385\r\n p = np.array([0.7577,-25.283,149.27,-282.53,229.13,-69.048])\r\n elif Ra == 10:\r\n F0 = 3.8951e-4\r\n q = -1.0447\r\n p = np.array([-2.2679,79.474,-237.74,329.07,-223.43,59.581])\r\n P = 0\r\n for i,pi in enumerate(p):\r\n P += pi*Q**(i+1)\r\n return (\r\n F0*np.power(Q,-.5,where=Q>0)*np.power(1-Q,-l,where=Q<1)\r\n *np.where(Q>0.99,1+(1-Q)/2.+(1-Q)**2/3.+(1-Q)**3/4.+(1-Q)**4/5.,np.divide(-np.log(Q,where=Q>0),1-Q,where=Q<1))**q\r\n *np.exp(P)\r\n )\r\n\r\ndef F_reduced(E,params): # fitting form from Widrow 2000\r\n if params == [1,3,1]:\r\n F0 = 9.1968e-2\r\n q = -2.7419\r\n p = np.array([.362,-.5639,-.0859,-.4912])\r\n elif params == [1,3,1.5]:\r\n F0 = 4.8598e-1\r\n q = -2.8216\r\n p = np.array([.3526,-5.199,3.5461,-.884])\r\n elif params == [2,3,0]:\r\n F0 = 5.8807e-2\r\n q = -2.6312\r\n p = np.array([-3.7147,41.045,-132.2,216.9,-170.23,51.606])\r\n P = 0\r\n for i,pi in enumerate(p):\r\n P += pi*E**(i+1)\r\n return (\r\n F0*np.power(E,1.5,where=E>0)\r\n *np.where(E>0.99,1+(1-E)/2.+(1-E)**2/3.+(1-E)**3/4.+(1-E)**4/5.,np.divide(-np.log(E,where=E>0),1-E,where=E<1))**q\r\n *np.exp(P)\r\n )\r\n\r\ndef F_lambda(params): # fitting form from Widrow 2000\r\n if params == [1,3,1]:\r\n return 5./2\r\n elif params == [1,3,1.5]:\r\n return 9./2\r\n elif params == [2,3,0]:\r\n return 0.\r\n"
] | [
[
"numpy.divide",
"numpy.array",
"numpy.log",
"numpy.exp",
"numpy.power",
"numpy.sqrt",
"numpy.arcsinh",
"scipy.special.spence"
]
] |
xmatthias/PyTables | [
"da01cf8908c2d8c2b07e8a35685f0811807453f6"
] | [
"tables/tests/common.py"
] | [
"\"\"\"Utilities for PyTables' test suites.\"\"\"\n\nimport os\nimport re\nimport sys\nimport locale\nimport platform\nimport tempfile\nfrom pathlib import Path\nfrom time import perf_counter as clock\nfrom packaging.version import Version\n\nimport unittest\n\nimport numexpr as ne\nimport numpy as np\n\nimport tables as tb\nfrom tables.req_versions import min_blosc_bitshuffle_version\n\nhdf5_version = Version(tb.hdf5_version)\nblosc_version = Version(tb.which_lib_version(\"blosc\")[1])\n\n\nverbose = os.environ.get(\"VERBOSE\", \"FALSE\") == \"TRUE\"\n\"\"\"Show detailed output of the testing process.\"\"\"\n\nheavy = False\n\"\"\"Run all tests even when they take long to complete.\"\"\"\n\nshow_memory = False\n\"\"\"Show the progress of memory consumption.\"\"\"\n\n\ndef parse_argv(argv):\n global verbose, heavy\n\n if 'verbose' in argv:\n verbose = True\n argv.remove('verbose')\n\n if 'silent' in argv: # take care of old flag, just in case\n verbose = False\n argv.remove('silent')\n\n if '--heavy' in argv:\n heavy = True\n argv.remove('--heavy')\n\n return argv\n\n\nzlib_avail = tb.which_lib_version(\"zlib\") is not None\nlzo_avail = tb.which_lib_version(\"lzo\") is not None\nbzip2_avail = tb.which_lib_version(\"bzip2\") is not None\nblosc_avail = tb.which_lib_version(\"blosc\") is not None\n\n\ndef print_heavy(heavy):\n if heavy:\n print(\"\"\"Performing the complete test suite!\"\"\")\n else:\n print(\"\"\"\\\nPerforming only a light (yet comprehensive) subset of the test suite.\nIf you want a more complete test, try passing the --heavy flag to this script\n(or set the 'heavy' parameter in case you are using tables.test() call).\nThe whole suite will take more than 4 hours to complete on a relatively\nmodern CPU and around 512 MB of main memory.\"\"\")\n print('-=' * 38)\n\n\ndef print_versions():\n \"\"\"Print all the versions of software that PyTables relies on.\"\"\"\n\n print('-=' * 38)\n print(\"PyTables version: %s\" % tb.__version__)\n print(\"HDF5 version: %s\" % tb.which_lib_version(\"hdf5\")[1])\n print(\"NumPy version: %s\" % np.__version__)\n tinfo = tb.which_lib_version(\"zlib\")\n if ne.use_vml:\n # Get only the main version number and strip out all the rest\n vml_version = ne.get_vml_version()\n vml_version = re.findall(\"[0-9.]+\", vml_version)[0]\n vml_avail = \"using VML/MKL %s\" % vml_version\n else:\n vml_avail = \"not using Intel's VML/MKL\"\n print(f\"Numexpr version: {ne.__version__} ({vml_avail})\")\n if tinfo is not None:\n print(f\"Zlib version: {tinfo[1]} (in Python interpreter)\")\n tinfo = tb.which_lib_version(\"lzo\")\n if tinfo is not None:\n print(\"LZO version: {} ({})\".format(tinfo[1], tinfo[2]))\n tinfo = tb.which_lib_version(\"bzip2\")\n if tinfo is not None:\n print(\"BZIP2 version: {} ({})\".format(tinfo[1], tinfo[2]))\n tinfo = tb.which_lib_version(\"blosc\")\n if tinfo is not None:\n blosc_date = tinfo[2].split()[1]\n print(\"Blosc version: {} ({})\".format(tinfo[1], blosc_date))\n blosc_cinfo = tb.blosc_get_complib_info()\n blosc_cinfo = [\n \"{} ({})\".format(k, v[1]) for k, v in sorted(blosc_cinfo.items())\n ]\n print(\"Blosc compressors: %s\" % ', '.join(blosc_cinfo))\n blosc_finfo = ['shuffle']\n if Version(tinfo[1]) >= tb.req_versions.min_blosc_bitshuffle_version:\n blosc_finfo.append('bitshuffle')\n print(\"Blosc filters: %s\" % ', '.join(blosc_finfo))\n try:\n from Cython import __version__ as cython_version\n print('Cython version: %s' % cython_version)\n except Exception:\n pass\n print('Python version: %s' % sys.version)\n print('Platform: %s' % platform.platform())\n # if os.name == 'posix':\n # (sysname, nodename, release, version, machine) = os.uname()\n # print('Platform: %s-%s' % (sys.platform, machine))\n print('Byte-ordering: %s' % sys.byteorder)\n print('Detected cores: %s' % tb.utils.detect_number_of_cores())\n print('Default encoding: %s' % sys.getdefaultencoding())\n print('Default FS encoding: %s' % sys.getfilesystemencoding())\n print('Default locale: (%s, %s)' % locale.getdefaultlocale())\n print('-=' * 38)\n\n # This should improve readability whan tests are run by CI tools\n sys.stdout.flush()\n\n\ndef test_filename(filename):\n from pkg_resources import resource_filename\n return resource_filename('tables.tests', filename)\n\n\ndef verbosePrint(string, nonl=False):\n \"\"\"Print out the `string` if verbose output is enabled.\"\"\"\n if not verbose:\n return\n if nonl:\n print(string, end=' ')\n else:\n print(string)\n\n\ndef allequal(a, b, flavor=\"numpy\"):\n \"\"\"Checks if two numerical objects are equal.\"\"\"\n\n # print(\"a-->\", repr(a))\n # print(\"b-->\", repr(b))\n if not hasattr(b, \"shape\"):\n # Scalar case\n return a == b\n\n if ((not hasattr(a, \"shape\") or a.shape == ()) and\n (not hasattr(b, \"shape\") or b.shape == ())):\n return a == b\n\n if a.shape != b.shape:\n if verbose:\n print(\"Shape is not equal:\", a.shape, \"!=\", b.shape)\n return 0\n\n # Way to check the type equality without byteorder considerations\n if hasattr(b, \"dtype\") and a.dtype.str[1:] != b.dtype.str[1:]:\n if verbose:\n print(\"dtype is not equal:\", a.dtype, \"!=\", b.dtype)\n return 0\n\n # Rank-0 case\n if len(a.shape) == 0:\n if a[()] == b[()]:\n return 1\n else:\n if verbose:\n print(\"Shape is not equal:\", a.shape, \"!=\", b.shape)\n return 0\n\n # null arrays\n if a.size == 0: # len(a) is not correct for generic shapes\n if b.size == 0:\n return 1\n else:\n if verbose:\n print(\"length is not equal\")\n print(\"len(a.data) ==>\", len(a.data))\n print(\"len(b.data) ==>\", len(b.data))\n return 0\n\n # Multidimensional case\n result = (a == b)\n result = np.all(result)\n if not result and verbose:\n print(\"Some of the elements in arrays are not equal\")\n\n return result\n\n\ndef areArraysEqual(arr1, arr2):\n \"\"\"Are both `arr1` and `arr2` equal arrays?\n\n Arguments can be regular NumPy arrays, chararray arrays or\n structured arrays (including structured record arrays). They are\n checked for type and value equality.\n\n \"\"\"\n\n t1 = type(arr1)\n t2 = type(arr2)\n\n if not ((hasattr(arr1, 'dtype') and arr1.dtype == arr2.dtype) or\n issubclass(t1, t2) or issubclass(t2, t1)):\n return False\n\n return np.all(arr1 == arr2)\n\n\nclass PyTablesTestCase(unittest.TestCase):\n def tearDown(self):\n super().tearDown()\n for key in self.__dict__:\n if self.__dict__[key].__class__.__name__ != 'instancemethod':\n self.__dict__[key] = None\n\n def _getName(self):\n \"\"\"Get the name of this test case.\"\"\"\n return self.id().split('.')[-2]\n\n def _getMethodName(self):\n \"\"\"Get the name of the method currently running in the test case.\"\"\"\n return self.id().split('.')[-1]\n\n def _verboseHeader(self):\n \"\"\"Print a nice header for the current test method if verbose.\"\"\"\n\n if verbose:\n name = self._getName()\n methodName = self._getMethodName()\n\n title = f\"Running {name}.{methodName}\"\n print('{}\\n{}'.format(title, '-' * len(title)))\n\n def _checkEqualityGroup(self, node1, node2, hardlink=False):\n if verbose:\n print(\"Group 1:\", node1)\n print(\"Group 2:\", node2)\n if hardlink:\n self.assertTrue(\n node1._v_pathname != node2._v_pathname,\n \"node1 and node2 have the same pathnames.\")\n else:\n self.assertTrue(\n node1._v_pathname == node2._v_pathname,\n \"node1 and node2 does not have the same pathnames.\")\n self.assertTrue(\n node1._v_children == node2._v_children,\n \"node1 and node2 does not have the same children.\")\n\n def _checkEqualityLeaf(self, node1, node2, hardlink=False):\n if verbose:\n print(\"Leaf 1:\", node1)\n print(\"Leaf 2:\", node2)\n if hardlink:\n self.assertTrue(\n node1._v_pathname != node2._v_pathname,\n \"node1 and node2 have the same pathnames.\")\n else:\n self.assertTrue(\n node1._v_pathname == node2._v_pathname,\n \"node1 and node2 does not have the same pathnames.\")\n self.assertTrue(\n areArraysEqual(node1[:], node2[:]),\n \"node1 and node2 does not have the same values.\")\n\n\nclass TestFileMixin:\n h5fname = None\n open_kwargs = {}\n\n def setUp(self):\n super().setUp()\n self.h5file = tb.open_file(\n self.h5fname, title=self._getName(), **self.open_kwargs)\n\n def tearDown(self):\n \"\"\"Close ``h5file``.\"\"\"\n\n self.h5file.close()\n super().tearDown()\n\n\nclass TempFileMixin:\n open_mode = 'w'\n open_kwargs = {}\n\n def _getTempFileName(self):\n return tempfile.mktemp(prefix=self._getName(), suffix='.h5')\n\n def setUp(self):\n \"\"\"Set ``h5file`` and ``h5fname`` instance attributes.\n\n * ``h5fname``: the name of the temporary HDF5 file.\n * ``h5file``: the writable, empty, temporary HDF5 file.\n\n \"\"\"\n\n super().setUp()\n self.h5fname = self._getTempFileName()\n self.h5file = tb.open_file(\n self.h5fname, self.open_mode, title=self._getName(),\n **self.open_kwargs)\n\n def tearDown(self):\n \"\"\"Close ``h5file`` and remove ``h5fname``.\"\"\"\n\n self.h5file.close()\n self.h5file = None\n Path(self.h5fname).unlink() # comment this for debug only\n super().tearDown()\n\n def _reopen(self, mode='r', **kwargs):\n \"\"\"Reopen ``h5file`` in the specified ``mode``.\n\n Returns a true or false value depending on whether the file was\n reopenend or not. If not, nothing is changed.\n\n \"\"\"\n\n self.h5file.close()\n self.h5file = tb.open_file(self.h5fname, mode, **kwargs)\n return True\n\n\nclass ShowMemTime(PyTablesTestCase):\n tref = clock()\n \"\"\"Test for showing memory and time consumption.\"\"\"\n\n def test00(self):\n \"\"\"Showing memory and time consumption.\"\"\"\n\n # Obtain memory info (only for Linux 2.6.x)\n for line in Path(\"/proc/self/status\").read_text().splitlines():\n if line.startswith(\"VmSize:\"):\n vmsize = int(line.split()[1])\n elif line.startswith(\"VmRSS:\"):\n vmrss = int(line.split()[1])\n elif line.startswith(\"VmData:\"):\n vmdata = int(line.split()[1])\n elif line.startswith(\"VmStk:\"):\n vmstk = int(line.split()[1])\n elif line.startswith(\"VmExe:\"):\n vmexe = int(line.split()[1])\n elif line.startswith(\"VmLib:\"):\n vmlib = int(line.split()[1])\n print(\"\\nWallClock time:\", clock() - self.tref)\n print(\"Memory usage: ******* %s *******\" % self._getName())\n print(f\"VmSize: {vmsize:>7} kB\\tVmRSS: {vmrss:>7} kB\")\n print(f\"VmData: {vmdata:>7} kB\\tVmStk: {vmstk:>7} kB\")\n print(f\"VmExe: {vmexe:>7} kB\\tVmLib: {vmlib:>7} kB\")\n"
] | [
[
"numpy.all"
]
] |
Abdurehman458/libfacedetection.train.TF2 | [
"14e29bffb7a611b7df463561c90825305bddd18e"
] | [
"src/multibox_loss.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom utils import match, log_sum_exp\nfrom eiou import eiou_loss\n\nGPU = True\n\nclass MultiBoxLoss(nn.Module):\n \"\"\"SSD Weighted Loss Function\n Compute Targets:\n 1) Produce Confidence Target Indices by matching ground truth boxes\n with (default) 'priorboxes' that have jaccard index > threshold parameter\n (default threshold: 0.5).\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3) Hard negative mining to filter the excessive number of negative examples\n that comes with using a large number of default bounding boxes.\n (default negative:positive ratio 3:1)\n Objective Loss:\n L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n weighted by α which is set to 1 by cross val.\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n \"\"\"\n\n def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label,\n neg_mining, neg_pos, neg_overlap, encode_target, rect_only):\n super(MultiBoxLoss, self).__init__()\n self.num_classes = num_classes\n self.threshold = overlap_thresh\n self.background_label = bkg_label\n self.encode_target = encode_target\n self.use_prior_for_matching = prior_for_matching\n self.do_neg_mining = neg_mining\n self.negpos_ratio = neg_pos\n self.neg_overlap = neg_overlap\n self.variance = [0.1, 0.2]\n self.rect_only = rect_only\n self.smooth_point = 0.2\n\n def forward(self, predictions, priors, targets):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n and prior boxes from SSD net.\n conf shape: torch.size(batch_size,num_priors,num_classes)\n loc shape: torch.size(batch_size,num_priors,14)\n priors shape: torch.size(num_priors,4)\n\n ground_truth (tensor): Ground truth boxes and labels for a batch,\n shape: [batch_size,num_objs,15] (last idx is the label).\n \"\"\"\n\n loc_data, conf_data, iou_data = predictions\n # print(loc_data)\n # print(conf_data)\n # print(iou_data)\n # exit()\n priors = priors\n num = loc_data.size(0)\n num_priors = (priors.size(0))\n\n # match priors (default boxes) and ground truth boxes\n loc_t = torch.Tensor(num, num_priors, 4)\n conf_t = torch.LongTensor(num, num_priors)\n iou_t = torch.Tensor(num, num_priors)\n for idx in range(num):\n truths = targets[idx][:, 0:4].data\n labels = targets[idx][:, -1].data\n # print(\"truths\",truths)\n # print(\"labels\",labels)\n defaults = priors.data\n # exit()\n iou_t[idx] = match(self.threshold, truths, defaults, self.variance, labels, loc_t, conf_t, idx)\n # print(iou_t[idx],iou_t[idx].shape)\n # exit()\n iou_t = iou_t.view(num, num_priors, 1)\n # print(iou_t,iou_t.shape)\n # exit()\n # print(iou_t,iou_t.shape)\n # a =0\n # for x in range(iou_t.shape[1]):\n # if iou_t[:,x,:].numpy() > 0:\n # a +=1\n # if iou_t[:,x,:].numpy() >= 2:\n # print(iou_t[:,x,:].numpy(),x)\n # print(a)\n # print ((iou_t == 2).nonzero())\n # exit()\n if GPU:\n device = priors.get_device()\n loc_t = loc_t.cuda(device)\n conf_t = conf_t.cuda(device)\n iou_t = iou_t.cuda(device)\n\n pos = conf_t > 0\n # print(pos.shape)\n # print ((conf_t == 1).nonzero())\n # print ((pos == True).nonzero())\n # exit()\n # Localization Loss\n # Shape: [batch,num_priors,4]\n pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)\n loc_p = loc_data[pos_idx].view(-1, 4)\n loc_t = loc_t[pos_idx].view(-1, 4)\n loss_l = eiou_loss(loc_p[:, 0:4], loc_t[:, 0:4], variance=self.variance, smooth_point=self.smooth_point, reduction='sum')\n # loss_lm = F.smooth_l1_loss(loc_p[:, 4:14], loc_t[:, 4:14], reduction='sum')\n # IoU diff\n pos_idx_ = pos.unsqueeze(pos.dim()).expand_as(iou_data)\n iou_p = iou_data[pos_idx_].view(-1, 1)\n iou_t = iou_t[pos_idx_].view(-1, 1)\n loss_iou = F.smooth_l1_loss(iou_p, iou_t, reduction='sum')\n\n # Compute max conf across batch for hard negative mining\n batch_conf = conf_data.view(-1, self.num_classes)\n # print(batch_conf.gather(1, conf_t.view(-1, 1)))\n # exit()\n loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))\n # print(loss_c,loss_c.shape)\n # exit()\n # Hard Negative Mining\n # print(\"pos_r\",pos.view(-1, 1).shape)\n loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now\n loss_c = loss_c.view(num, -1)\n \n _, loss_idx = loss_c.sort(1, descending=True)\n _, idx_rank = loss_idx.sort(1)\n num_pos = pos.long().sum(1, keepdim=True)\n num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)\n neg = idx_rank < num_neg.expand_as(idx_rank)\n\n # Confidence Loss Including Positive and Negative Examples\n pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1,self.num_classes)\n targets_weighted = conf_t[(pos+neg).gt(0)]\n loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')\n\n # Sum of losses\n N = max(num_pos.data.sum().float(), 1)\n loss_l /= N\n # loss_lm /= N\n loss_c /= N\n loss_iou /= N\n # return loss_l, loss_lm, loss_c, loss_iou\n return loss_l, loss_c, loss_iou\n"
] | [
[
"torch.LongTensor",
"torch.nn.functional.cross_entropy",
"torch.Tensor",
"torch.nn.functional.smooth_l1_loss"
]
] |
buaacjw/Epidemic-Modeling-survey | [
"d0065a2dd4c43eb1fe724d1facde2ec577ecfa94"
] | [
"visual/visual_graph.py"
] | [
"import networkx as nx\nimport matplotlib.pyplot as plt\nfrom compartment.Graph import Graph\nfrom compartment.Model import Model\n\n\ndef visual_graph(graph: Graph):\n G = nx.MultiDiGraph()\n edge_list = []\n for node_name in graph.name2node.keys():\n node = graph.name2node[node_name]\n for next_name in node.next_name_list:\n edge_list.append((node_name, next_name))\n G.add_edges_from(edge_list)\n\n plt.figure(figsize=(8, 8))\n nx.draw(G, with_labels=True)\n plt.show()\n\n\ndef visual_model(model: Model):\n G = nx.MultiDiGraph()\n edge_list = []\n for node_name in model.name2compartments.keys():\n node = model.name2compartments[node_name].node\n for next_name in node.next_name_list:\n edge_list.append((node_name, next_name))\n G.add_edges_from(edge_list)\n\n plt.figure(figsize=(8, 8))\n nx.draw(G, with_labels=True)\n plt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
lod531/regPyHDFE | [
"7e44f7b859e902d5cb1a53f16f9576d08b507645"
] | [
"exploratory/utils.py"
] | [
"import numpy as np\nimport pandas as pd\n\ndef sklearn_to_df(sklearn_dataset):\n df = pd.DataFrame(sklearn_dataset.data, columns=sklearn_dataset.feature_names)\n df['target'] = pd.Series(sklearn_dataset.target)\n return df\n\ndef add_intercept(X):\n # X has to be a 2D numpy array\n # appends intercept as the last column\n intercept = np.ones(X.shape[0])\n return np.c_[X, intercept]\n\ndef get_np_columns(df, columns, intercept=False):\n # dataframe is a pandas datafram\n # columns is a list of column names\n # if intercept is true a column of 1s will be appended to the result matrix\n # returns columns as float64 matrix\n if columns == []:\n return None\n else:\n res = np.expand_dims(a=df[columns[0]].to_numpy().astype('float64'), axis=1)\n if len(columns) > 1:\n for name in columns[1:]:\n res = np.c_[res, np.expand_dims(a=df[name].to_numpy().astype('float64'), axis=1)]\n if intercept:\n res = add_intercept(res)\n return res\n"
] | [
[
"pandas.DataFrame",
"numpy.ones",
"pandas.Series"
]
] |
zhisbug/Megatron-LM | [
"b31e1296354e979722627a6c4dedafe19b51fa97"
] | [
"megatron/training.py"
] | [
"# coding=utf-8\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pretrain utilities.\"\"\"\n\nfrom datetime import datetime\nimport math\nimport sys\nimport time\n# The earliest we can measure the start time.\n_TRAIN_START_TIME = time.time()\n\nimport torch\nfrom torch.nn.parallel.distributed import DistributedDataParallel as torchDDP\n\nfrom megatron import get_args\nfrom megatron import get_timers\nfrom megatron import get_tensorboard_writer\nfrom megatron import get_current_global_batch_size\nfrom megatron import get_num_microbatches\nfrom megatron import is_last_rank\nfrom megatron import update_num_microbatches\nfrom megatron import mpu\nfrom megatron import print_rank_0\nfrom megatron import print_rank_last\nfrom megatron.checkpointing import load_checkpoint\nfrom megatron.checkpointing import save_checkpoint\nfrom megatron.model import Float16Module\nfrom megatron.model import ModelType\nfrom megatron.optimizer import get_megatron_optimizer\nfrom megatron.initialize import initialize_megatron\nfrom megatron.initialize import write_args_to_tensorboard\nfrom megatron.learning_rates import AnnealingLR\nfrom megatron.model import DistributedDataParallel as LocalDDP\nfrom megatron.utils import check_adlr_autoresume_termination\nfrom megatron.utils import unwrap_model\nfrom megatron.data.data_samplers import build_pretraining_data_loader\nfrom megatron.utils import calc_params_l2_norm\nfrom megatron.schedules import get_forward_backward_func\nfrom megatron.utils import report_memory\n\n\n\ndef print_datetime(string):\n \"\"\"Note that this call will sync across all ranks.\"\"\"\n torch.distributed.barrier()\n time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print_rank_0('[' + string + '] datetime: {} '.format(time_str))\n\n\ndef pretrain(train_valid_test_dataset_provider,\n model_provider,\n model_type,\n forward_step_func,\n extra_args_provider=None,\n args_defaults={}):\n \"\"\"Main training program.\n\n This function will run the followings in the order provided:\n 1) initialize Megatron.\n 2) setup model, optimizer and lr schedule using the model_provider.\n 3) call train_val_test_data_provider to get train/val/test datasets.\n 4) train the modle using the forward_step_func.\n\n Arguments:\n train_valid_test_dataset_provider: a function that takes the size of\n train/valid/test dataset and returns `train, valid, test` datasets.\n model_provider: a function that returns a vanilla version of the\n model. By vanilla we mean a simple model on cpu with no fp16 or ddp.\n model_type: an enum that specifies the type of model being trained.\n forward_step_func: a function that takes a `data iterator` and `model`,\n and returns a `loss` scalar with a dictionary with key:values being\n the info we would like to monitor during training, for example\n `lm-loss: value`. We also require that this function add\n `batch generator` to the timers class.\n extra_args_provider: a function that takes a parser and adds arguments\n to it. It is used for programs to add their own arguments.\n args_defaults: a dictionary from argument-name to argument-value. It\n to set already parse arguments.\n \"\"\"\n\n # Initalize and get arguments, timers, and Tensorboard writer.\n initialize_megatron(extra_args_provider=extra_args_provider,\n args_defaults=args_defaults)\n\n # Adjust the startup time so it reflects the largest value.\n # This will be closer to what scheduler will see (outside of\n # image ... launches.\n global _TRAIN_START_TIME\n start_time_tensor = torch.cuda.DoubleTensor([_TRAIN_START_TIME])\n torch.distributed.all_reduce(start_time_tensor,\n op=torch.distributed.ReduceOp.MIN)\n _TRAIN_START_TIME = start_time_tensor.item()\n print_rank_0('time to initialize megatron (seconds): {:.3f}'.format(\n time.time() - _TRAIN_START_TIME))\n print_datetime('after megatron is initialized')\n\n args = get_args()\n timers = get_timers()\n\n # Model, optimizer, and learning rate.\n timers('model-and-optimizer-setup').start()\n model, optimizer, lr_scheduler = setup_model_and_optimizer(model_provider,\n model_type)\n timers('model-and-optimizer-setup').stop()\n print_datetime('after model, optimizer, and learning rate '\n 'scheduler are built')\n\n # Data stuff.\n timers('train/valid/test-data-iterators-setup').start()\n if args.virtual_pipeline_model_parallel_size is not None:\n all_data_iterators = [\n build_train_valid_test_data_iterators(train_valid_test_dataset_provider)\n for _ in range(len(model))\n ]\n train_data_iterator = [data_iterators[0] for data_iterators in all_data_iterators]\n valid_data_iterator = [data_iterators[1] for data_iterators in all_data_iterators]\n test_data_iterator = [data_iterators[2] for data_iterators in all_data_iterators]\n else:\n train_data_iterator, valid_data_iterator, test_data_iterator \\\n = build_train_valid_test_data_iterators(\n train_valid_test_dataset_provider)\n timers('train/valid/test-data-iterators-setup').stop()\n print_datetime('after dataloaders are built')\n\n # Print setup timing.\n print_rank_0('done with setup ...')\n timers.log(['model-and-optimizer-setup', 'train/valid/test-data-iterators-setup'])\n print_rank_0('training ...')\n\n iteration = 0\n if args.do_train and args.train_iters > 0:\n iteration = train(forward_step_func,\n model, optimizer, lr_scheduler,\n train_data_iterator, valid_data_iterator)\n print_datetime('after training is done')\n\n if args.do_valid:\n prefix = 'the end of training for val data'\n evaluate_and_print_results(prefix, forward_step_func,\n valid_data_iterator, model,\n iteration, False)\n\n if args.save and iteration != 0:\n save_checkpoint(iteration, model, optimizer, lr_scheduler)\n\n if args.do_test:\n # Run on test data.\n prefix = 'the end of training for test data'\n evaluate_and_print_results(prefix, forward_step_func,\n test_data_iterator, model,\n 0, True)\n\ndef update_train_iters(args):\n\n # For iteration-based training, we don't need to do anything\n if args.train_iters:\n return\n\n # Constant batch size with sample-based training.\n if args.rampup_batch_size is None:\n args.train_iters = args.train_samples // args.global_batch_size\n\n else:\n # Sample based training with rampup batch size.\n iterations = 0\n consumed_samples = 0\n # Rampup phase.\n while consumed_samples <= int(args.rampup_batch_size[2]):\n update_num_microbatches(consumed_samples, consistency_check=False)\n consumed_samples += get_current_global_batch_size()\n iterations += 1\n # Reset\n update_num_microbatches(0, consistency_check=False)\n # Constant phase\n # Note that we throw away any partial last batch.\n iterations += (args.train_samples - consumed_samples) // \\\n args.global_batch_size\n args.train_iters = iterations\n\n print_rank_0('setting training iterations to {}'.format(args.train_iters))\n\n\ndef get_model(model_provider_func, model_type=ModelType.encoder_or_decoder, wrap_with_ddp=True):\n \"\"\"Build the model.\"\"\"\n args = get_args()\n args.model_type = model_type\n\n # Build model.\n if mpu.get_pipeline_model_parallel_world_size() > 1 and \\\n args.virtual_pipeline_model_parallel_size is not None:\n assert model_type != ModelType.encoder_and_decoder, \\\n \"Interleaved schedule not supported for model with both encoder and decoder\"\n model = []\n for i in range(args.virtual_pipeline_model_parallel_size):\n mpu.set_virtual_pipeline_model_parallel_rank(i)\n # Set pre_process and post_process only after virtual rank is set.\n pre_process = mpu.is_pipeline_first_stage()\n post_process = mpu.is_pipeline_last_stage()\n this_model = model_provider_func(\n pre_process=pre_process,\n post_process=post_process\n )\n this_model.model_type = model_type\n model.append(this_model)\n else:\n pre_process = mpu.is_pipeline_first_stage()\n post_process = mpu.is_pipeline_last_stage()\n add_encoder = True\n add_decoder = True\n if model_type == ModelType.encoder_and_decoder:\n if mpu.get_pipeline_model_parallel_world_size() > 1:\n assert args.pipeline_model_parallel_split_rank is not None, \\\n \"Split rank needs to be specified for model with both encoder and decoder\"\n rank = mpu.get_pipeline_model_parallel_rank()\n split_rank = args.pipeline_model_parallel_split_rank\n world_size = mpu.get_pipeline_model_parallel_world_size()\n pre_process = rank == 0 or rank == split_rank\n post_process = (rank == (split_rank - 1)) or (\n rank == (world_size - 1))\n add_encoder = mpu.is_pipeline_stage_before_split()\n add_decoder = mpu.is_pipeline_stage_after_split()\n model = model_provider_func(\n pre_process=pre_process,\n post_process=post_process,\n add_encoder=add_encoder,\n add_decoder=add_decoder)\n else:\n model = model_provider_func(\n pre_process=pre_process,\n post_process=post_process\n )\n model.model_type = model_type\n\n if not isinstance(model, list):\n model = [model]\n\n # Set tensor model parallel attributes if not set.\n # Only parameters that are already tensor model parallel have these\n # attributes set for them. We should make sure the default attributes\n # are set for all params so the optimizer can use them.\n for model_module in model:\n for param in model_module.parameters():\n mpu.set_defaults_if_not_set_tensor_model_parallel_attributes(param)\n\n # Print number of parameters.\n if mpu.get_data_parallel_rank() == 0:\n print(' > number of parameters on (tensor, pipeline) '\n 'model parallel rank ({}, {}): {}'.format(\n mpu.get_tensor_model_parallel_rank(),\n mpu.get_pipeline_model_parallel_rank(),\n sum([sum([p.nelement() for p in model_module.parameters()])\n for model_module in model])), flush=True)\n\n # GPU allocation.\n for model_module in model:\n model_module.cuda(torch.cuda.current_device())\n\n # Fp16 conversion.\n if args.fp16 or args.bf16:\n model = [Float16Module(model_module, args) for model_module in model]\n\n if wrap_with_ddp:\n if args.DDP_impl == 'torch':\n i = torch.cuda.current_device()\n model = [torchDDP(model_module, device_ids=[i], output_device=i,\n process_group=mpu.get_data_parallel_group())\n for model_module in model]\n\n elif args.DDP_impl == 'local':\n model = [LocalDDP(model_module,\n args.accumulate_allreduce_grads_in_fp32,\n args.use_contiguous_buffers_in_local_ddp)\n for model_module in model]\n\n else:\n raise NotImplementedError('Unknown DDP implementation specified: '\n '{}. Exiting.'.format(args.DDP_impl))\n\n return model\n\n\ndef get_learning_rate_scheduler(optimizer):\n \"\"\"Build the learning rate scheduler.\"\"\"\n args = get_args()\n\n # Iteration-based training.\n if args.train_iters:\n if args.lr_decay_iters is None:\n args.lr_decay_iters = args.train_iters\n decay_steps = args.lr_decay_iters * args.global_batch_size\n if args.lr_warmup_fraction is not None:\n warmup_steps = args.lr_warmup_fraction * decay_steps\n else:\n warmup_steps = args.lr_warmup_iters * args.global_batch_size\n # Sample-based training.\n elif args.train_samples:\n # We need to set training iters for later use. Technically\n # we need to adjust the training samples too (due to last\n # batch being incomplete) but we leave it as is for now.\n update_train_iters(args)\n if args.lr_decay_samples is None:\n args.lr_decay_samples = args.train_samples\n decay_steps = args.lr_decay_samples\n if args.lr_warmup_fraction is not None:\n warmup_steps = args.lr_warmup_fraction * decay_steps\n else:\n warmup_steps = args.lr_warmup_samples\n else:\n raise Exception(\n 'either train-iters or train-samples should be provided.')\n\n lr_scheduler = AnnealingLR(\n optimizer,\n max_lr=args.lr,\n min_lr=args.min_lr,\n warmup_steps=warmup_steps,\n decay_steps=decay_steps,\n decay_style=args.lr_decay_style,\n use_checkpoint_lr_scheduler=args.use_checkpoint_lr_scheduler,\n override_lr_scheduler=args.override_lr_scheduler)\n\n return lr_scheduler\n\n\ndef setup_model_and_optimizer(model_provider_func, model_type):\n \"\"\"Setup model and optimizer.\"\"\"\n args = get_args()\n\n model = get_model(model_provider_func, model_type)\n\n unwrapped_model = unwrap_model(model,\n (torchDDP, LocalDDP, Float16Module))\n optimizer = get_megatron_optimizer(unwrapped_model)\n\n lr_scheduler = get_learning_rate_scheduler(optimizer)\n\n if args.load is not None:\n timers = get_timers()\n # Extra barrier is added to make sure all ranks report the\n # max time.\n torch.distributed.barrier()\n timers('load-checkpoint').start()\n args.iteration = load_checkpoint(model, optimizer, lr_scheduler)\n torch.distributed.barrier()\n timers('load-checkpoint').stop()\n timers.log(['load-checkpoint'])\n else:\n args.iteration = 0\n\n # We only support local DDP with multiple micro-batches.\n if len(model) > 1 or mpu.get_pipeline_model_parallel_world_size() > 1:\n assert args.DDP_impl == 'local'\n\n # get model without FP16 and/or TorchDDP wrappers\n if args.iteration == 0 and len(unwrapped_model) == 1 \\\n and hasattr(unwrapped_model[0], 'init_state_dict_from_bert'):\n print_rank_0(\"Initializing ICT from pretrained BERT model\")\n unwrapped_model[0].init_state_dict_from_bert()\n if args.fp16:\n optimizer.reload_model_params()\n\n return model, optimizer, lr_scheduler\n\n\ndef train_step(forward_step_func, data_iterator,\n model, optimizer, lr_scheduler):\n \"\"\"Single training step.\"\"\"\n args = get_args()\n timers = get_timers()\n\n # Set grad to zero.\n if args.DDP_impl == 'local' and args.use_contiguous_buffers_in_local_ddp:\n for partition in model:\n partition.zero_grad_buffer()\n optimizer.zero_grad()\n\n forward_backward_func = get_forward_backward_func()\n losses_reduced = forward_backward_func(\n forward_step_func, data_iterator, model,\n optimizer, timers, forward_only=False)\n\n # Empty unused memory\n if args.empty_unused_memory_level >= 1:\n torch.cuda.empty_cache()\n\n # All-reduce if needed.\n if args.DDP_impl == 'local':\n timers('backward-params-all-reduce').start()\n for model_module in model:\n model_module.allreduce_gradients()\n timers('backward-params-all-reduce').stop()\n\n # All-reduce word_embeddings' grad across first and last stages to ensure\n # that word_embeddings parameters stay in sync.\n # This should only run for models that support pipelined model parallelism\n # (BERT and GPT-2).\n timers('backward-embedding-all-reduce').start()\n if mpu.is_rank_in_embedding_group(ignore_virtual=True) and \\\n mpu.get_pipeline_model_parallel_world_size() > 1:\n if mpu.is_pipeline_first_stage(ignore_virtual=True):\n unwrapped_model = model[0]\n elif mpu.is_pipeline_last_stage(ignore_virtual=True):\n unwrapped_model = model[-1]\n else: # We do not support the interleaved schedule for T5 yet.\n unwrapped_model = model[0]\n unwrapped_model = unwrap_model(\n unwrapped_model, (torchDDP, LocalDDP, Float16Module))\n\n if unwrapped_model.share_word_embeddings:\n word_embeddings_weight = unwrapped_model.word_embeddings_weight()\n if args.DDP_impl == 'local':\n grad = word_embeddings_weight.main_grad\n else:\n grad = word_embeddings_weight.grad\n torch.distributed.all_reduce(grad, group=mpu.get_embedding_group())\n timers('backward-embedding-all-reduce').stop()\n\n # Update parameters.\n timers('optimizer').start()\n update_successful, grad_norm, num_zeros_in_grad = optimizer.step()\n timers('optimizer').stop()\n\n # Update learning rate.\n if update_successful:\n increment = get_num_microbatches() * \\\n args.micro_batch_size * \\\n args.data_parallel_size\n lr_scheduler.step(increment=increment)\n skipped_iter = 0\n else:\n skipped_iter = 1\n\n # Empty unused memory\n if args.empty_unused_memory_level >= 2:\n torch.cuda.empty_cache()\n\n if mpu.is_pipeline_last_stage(ignore_virtual=True):\n # Average loss across microbatches.\n loss_reduced = {}\n for key in losses_reduced[0]:\n losses_reduced_for_key = [x[key] for x in losses_reduced]\n loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\n return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad\n return {}, skipped_iter, grad_norm, num_zeros_in_grad\n\n\ndef training_log(loss_dict, total_loss_dict, learning_rate, iteration,\n loss_scale, report_memory_flag, skipped_iter,\n grad_norm, params_norm, num_zeros_in_grad):\n \"\"\"Log training information such as losses, timing, ....\"\"\"\n args = get_args()\n timers = get_timers()\n writer = get_tensorboard_writer()\n\n # Advanced, skipped, and Nan iterations.\n advanced_iters_key = 'advanced iterations'\n skipped_iters_key = 'skipped iterations'\n nan_iters_key = 'nan iterations'\n # Advanced iterations.\n if not skipped_iter:\n total_loss_dict[advanced_iters_key] = total_loss_dict.get(\n advanced_iters_key, 0) + 1\n else:\n if advanced_iters_key not in total_loss_dict:\n total_loss_dict[advanced_iters_key] = 0\n # Skipped iterations.\n total_loss_dict[skipped_iters_key] = total_loss_dict.get(\n skipped_iters_key, 0) + skipped_iter\n # Update losses and set nan iterations\n got_nan = False\n for key in loss_dict:\n if not skipped_iter:\n total_loss_dict[key] = total_loss_dict.get(\n key, torch.cuda.FloatTensor([0.0])) + loss_dict[key]\n else:\n value = loss_dict[key].float().sum().item()\n is_nan = value == float('inf') or \\\n value == -float('inf') or \\\n value != value\n got_nan = got_nan or is_nan\n total_loss_dict[nan_iters_key] = total_loss_dict.get(\n nan_iters_key, 0) + int(got_nan)\n\n # Logging.\n timers_to_log = []\n\n def add_to_logging(name):\n if name in timers.timers:\n timers_to_log.append(name)\n add_to_logging('forward-compute')\n add_to_logging('forward-recv')\n add_to_logging('forward-send')\n add_to_logging('forward-backward-send-forward-backward-recv')\n add_to_logging('backward-compute')\n add_to_logging('backward-recv')\n add_to_logging('backward-send')\n add_to_logging('backward-send-forward-recv')\n add_to_logging('backward-send-backward-recv')\n add_to_logging('backward-params-all-reduce')\n add_to_logging('backward-embedding-all-reduce')\n add_to_logging('optimizer-copy-to-main-grad')\n add_to_logging('optimizer-unscale-and-check-inf')\n add_to_logging('optimizer-clip-main-grad')\n add_to_logging('optimizer-copy-main-to-model-params')\n add_to_logging('optimizer')\n add_to_logging('batch-generator')\n\n # Calculate batch size.\n batch_size = args.micro_batch_size * args.data_parallel_size * \\\n get_num_microbatches()\n\n total_iterations = total_loss_dict[advanced_iters_key] + \\\n total_loss_dict[skipped_iters_key]\n\n # Tensorboard values.\n if writer and (iteration % args.tensorboard_log_interval == 0 ) and \\\n is_last_rank():\n if args.log_learning_rate_to_tensorboard:\n writer.add_scalar('learning-rate', learning_rate, iteration)\n writer.add_scalar('learning-rate vs samples', learning_rate,\n args.consumed_train_samples)\n if args.log_batch_size_to_tensorboard:\n writer.add_scalar('batch-size', batch_size, iteration)\n writer.add_scalar('batch-size vs samples', batch_size,\n args.consumed_train_samples)\n for key in loss_dict:\n writer.add_scalar(key , loss_dict[key], iteration)\n writer.add_scalar(key + ' vs samples', loss_dict[key],\n args.consumed_train_samples)\n if args.log_loss_scale_to_tensorboard:\n writer.add_scalar('loss-scale', loss_scale, iteration)\n writer.add_scalar('loss-scale vs samples', loss_scale,\n args.consumed_train_samples)\n if grad_norm is not None:\n writer.add_scalar('grad-norm', grad_norm, iteration)\n writer.add_scalar('grad-norm vs samples', grad_norm,\n args.consumed_train_samples)\n if num_zeros_in_grad is not None:\n writer.add_scalar('num-zeros', num_zeros_in_grad, iteration)\n writer.add_scalar('num-zeros vs samples', num_zeros_in_grad,\n args.consumed_train_samples)\n if params_norm is not None:\n writer.add_scalar('params-norm', params_norm, iteration)\n writer.add_scalar('params-norm vs samples', params_norm,\n args.consumed_train_samples)\n if args.log_timers_to_tensorboard:\n timers.write(timers_to_log, writer, iteration,\n normalizer=total_iterations)\n if args.log_memory_to_tensorboard:\n mem_stats = torch.cuda.memory_stats()\n writer.add_scalar(\n \"mem-reserved-bytes\",\n mem_stats[\"reserved_bytes.all.current\"],\n iteration,\n )\n writer.add_scalar(\n \"mem-allocated-bytes\",\n mem_stats[\"allocated_bytes.all.current\"],\n iteration,\n )\n writer.add_scalar(\n \"mem-allocated-count\",\n mem_stats[\"allocation.all.current\"],\n iteration,\n )\n\n if iteration % args.log_interval == 0:\n elapsed_time = timers('interval-time').elapsed()\n elapsed_time_per_iteration = elapsed_time / total_iterations\n if writer:\n if args.log_timers_to_tensorboard:\n writer.add_scalar('iteration-time',\n elapsed_time_per_iteration, iteration)\n log_string = ' iteration {:8d}/{:8d} |'.format(\n iteration, args.train_iters)\n log_string += ' consumed samples: {:12d} |'.format(\n args.consumed_train_samples)\n log_string += ' elapsed time per iteration (ms): {:.1f} |'.format(\n elapsed_time_per_iteration * 1000.0)\n log_string += ' learning rate: {:.3E} |'.format(learning_rate)\n log_string += ' global batch size: {:5d} |'.format(batch_size)\n for key in total_loss_dict:\n if key not in [advanced_iters_key, skipped_iters_key,\n nan_iters_key]:\n avg = total_loss_dict[key].item() / \\\n float(max(1, total_loss_dict[advanced_iters_key]))\n if avg > 0.0:\n log_string += ' {}: {:.6E} |'.format(key, avg)\n total_loss_dict[key] = torch.cuda.FloatTensor([0.0])\n log_string += ' loss scale: {:.1f} |'.format(loss_scale)\n if grad_norm is not None:\n log_string += ' grad norm: {:.3f} |'.format(grad_norm)\n if num_zeros_in_grad is not None:\n log_string += ' num zeros: {:.1f} |'.format(num_zeros_in_grad)\n if params_norm is not None:\n log_string += ' params norm: {:.3f} |'.format(params_norm)\n log_string += ' number of skipped iterations: {:3d} |'.format(\n total_loss_dict[skipped_iters_key])\n log_string += ' number of nan iterations: {:3d} |'.format(\n total_loss_dict[nan_iters_key])\n total_loss_dict[advanced_iters_key] = 0\n total_loss_dict[skipped_iters_key] = 0\n total_loss_dict[nan_iters_key] = 0\n print_rank_last(log_string)\n if report_memory_flag and learning_rate > 0.:\n # Report memory after optimizer state has been initialized.\n report_memory('(after {} iterations)'.format(iteration))\n report_memory_flag = False\n timers.log(timers_to_log, normalizer=args.log_interval)\n\n return report_memory_flag\n\n\ndef save_checkpoint_and_time(iteration, model, optimizer, lr_scheduler):\n timers = get_timers()\n # Extra barrier is added to make sure\n # all ranks report the max time.\n torch.distributed.barrier()\n timers('save-checkpoint').start()\n save_checkpoint(iteration, model, optimizer, lr_scheduler)\n torch.distributed.barrier()\n timers('save-checkpoint').stop()\n timers.log(['save-checkpoint'])\n\n\ndef train(forward_step_func, model, optimizer, lr_scheduler,\n train_data_iterator, valid_data_iterator):\n \"\"\"Train the model function.\"\"\"\n args = get_args()\n timers = get_timers()\n\n # Write args to tensorboard\n write_args_to_tensorboard()\n\n # Turn on training mode which enables dropout.\n for model_module in model:\n model_module.train()\n\n # Tracking loss.\n total_loss_dict = {}\n\n # Iterations.\n iteration = args.iteration\n\n timers('interval-time').start()\n print_datetime('before the start of training step')\n report_memory_flag = True\n while iteration < args.train_iters:\n update_num_microbatches(args.consumed_train_samples)\n loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = \\\n train_step(forward_step_func,\n train_data_iterator,\n model,\n optimizer,\n lr_scheduler)\n iteration += 1\n args.consumed_train_samples += mpu.get_data_parallel_world_size() * \\\n args.micro_batch_size * \\\n get_num_microbatches()\n\n # Logging.\n loss_scale = optimizer.get_loss_scale().item()\n params_norm = None\n if args.log_params_norm:\n params_norm = calc_params_l2_norm(model)\n report_memory_flag = training_log(loss_dict, total_loss_dict,\n optimizer.param_groups[0]['lr'],\n iteration, loss_scale,\n report_memory_flag, skipped_iter,\n grad_norm, params_norm, num_zeros_in_grad)\n\n # Autoresume\n if args.adlr_autoresume and \\\n (iteration % args.adlr_autoresume_interval == 0):\n check_adlr_autoresume_termination(iteration, model, optimizer,\n lr_scheduler)\n\n # Evaluation\n if args.eval_interval and iteration % args.eval_interval == 0 and \\\n args.do_valid:\n prefix = 'iteration {}'.format(iteration)\n evaluate_and_print_results(prefix, forward_step_func,\n valid_data_iterator, model,\n iteration, False)\n\n # Checkpointing\n saved_checkpoint = False\n if args.save and args.save_interval and \\\n iteration % args.save_interval == 0:\n save_checkpoint_and_time(iteration, model, optimizer,\n lr_scheduler)\n saved_checkpoint = True\n\n # Exiting based on duration\n if args.exit_duration_in_mins:\n train_time = (time.time() - _TRAIN_START_TIME) / 60.0\n done_cuda = torch.cuda.IntTensor(\n [train_time > args.exit_duration_in_mins])\n torch.distributed.all_reduce(\n done_cuda, op=torch.distributed.ReduceOp.MAX)\n done = done_cuda.item()\n if done:\n if not saved_checkpoint:\n save_checkpoint_and_time(iteration, model, optimizer,\n lr_scheduler)\n print_datetime('exiting program after {} minutes'.format(train_time))\n sys.exit()\n\n # Exiting based on iterations\n if args.exit_interval and iteration % args.exit_interval == 0:\n if not saved_checkpoint:\n save_checkpoint_and_time(iteration, model, optimizer,\n lr_scheduler)\n torch.distributed.barrier()\n print_datetime('exiting program at iteration {}'.format(iteration))\n sys.exit()\n\n\n return iteration\n\n\ndef evaluate(forward_step_func, data_iterator, model, verbose=False):\n \"\"\"Evaluation.\"\"\"\n args = get_args()\n\n # Turn on evaluation mode which disables dropout.\n for model_module in model:\n model_module.eval()\n\n total_loss_dict = {}\n\n with torch.no_grad():\n iteration = 0\n while iteration < args.eval_iters:\n iteration += 1\n if verbose and iteration % args.log_interval == 0:\n print_rank_0('Evaluating iter {}/{}'.format(iteration,\n args.eval_iters))\n\n forward_backward_func = get_forward_backward_func()\n loss_dicts = forward_backward_func(\n forward_step_func, data_iterator, model, optimizer=None,\n timers=None, forward_only=True)\n\n # Empty unused memory\n if args.empty_unused_memory_level >= 1:\n torch.cuda.empty_cache()\n\n if mpu.is_pipeline_last_stage(ignore_virtual=True):\n # Reduce across processes.\n for loss_dict in loss_dicts:\n for key in loss_dict:\n total_loss_dict[key] = total_loss_dict.get(\n key, torch.cuda.FloatTensor([0.0])) + loss_dict[key]\n\n args.consumed_valid_samples += mpu.get_data_parallel_world_size() \\\n * args.micro_batch_size \\\n * get_num_microbatches()\n # Move model back to the train mode.\n for model_module in model:\n model_module.train()\n\n for key in total_loss_dict:\n total_loss_dict[key] /= args.eval_iters * get_num_microbatches()\n\n return total_loss_dict\n\ndef evaluate_and_print_results(prefix, forward_step_func,\n data_iterator, model,\n iteration, verbose=False):\n \"\"\"Helper function to evaluate and dump results on screen.\"\"\"\n args = get_args()\n writer = get_tensorboard_writer()\n\n total_loss_dict = evaluate(forward_step_func, data_iterator, model, verbose)\n string = ' validation loss at {} | '.format(prefix)\n for key in total_loss_dict:\n string += '{} value: {:.6E} | '.format(key, total_loss_dict[key].item())\n ppl = math.exp(min(20, total_loss_dict[key].item()))\n string += '{} PPL: {:.6E} | '.format(key, ppl)\n if writer:\n writer.add_scalar('{} validation'.format(key),\n total_loss_dict[key].item(),\n iteration)\n writer.add_scalar('{} validation vs samples'.format(key),\n total_loss_dict[key].item(),\n args.consumed_train_samples)\n if args.log_validation_ppl_to_tensorboard:\n writer.add_scalar('{} validation ppl'.format(key), ppl,\n iteration)\n writer.add_scalar('{} validation ppl vs samples'.format(key),\n ppl, args.consumed_train_samples)\n\n length = len(string) + 1\n print_rank_last('-' * length)\n print_rank_last(string)\n print_rank_last('-' * length)\n\n\ndef cyclic_iter(iter):\n while True:\n for x in iter:\n yield x\n\ndef build_train_valid_test_data_iterators(\n build_train_valid_test_datasets_provider):\n \"\"\"XXX\"\"\"\n args = get_args()\n\n (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)\n\n print_rank_0('> building train, validation, and test datasets ...')\n\n # Backward compatibility, assume fixed batch size.\n if args.iteration > 0 and args.consumed_train_samples == 0:\n assert args.train_samples is None, \\\n 'only backward compatiblity support for iteration-based training'\n args.consumed_train_samples = args.iteration * args.global_batch_size\n if args.iteration > 0 and args.consumed_valid_samples == 0:\n if args.train_samples is None:\n args.consumed_valid_samples = (args.iteration // args.eval_interval) * \\\n args.eval_iters * args.global_batch_size\n\n # Data loader only on rank 0 of each model parallel group.\n if mpu.get_tensor_model_parallel_rank() == 0:\n\n # Number of train/valid/test samples.\n if args.train_samples:\n train_samples = args.train_samples\n else:\n train_samples = args.train_iters * args.global_batch_size\n eval_iters = (args.train_iters // args.eval_interval + 1) * \\\n args.eval_iters\n test_iters = args.eval_iters\n train_val_test_num_samples = [train_samples,\n eval_iters * args.global_batch_size,\n test_iters * args.global_batch_size]\n print_rank_0(' > datasets target sizes (minimum size):')\n print_rank_0(' train: {}'.format(train_val_test_num_samples[0]))\n print_rank_0(' validation: {}'.format(train_val_test_num_samples[1]))\n print_rank_0(' test: {}'.format(train_val_test_num_samples[2]))\n\n # Build the datasets.\n train_ds, valid_ds, test_ds = build_train_valid_test_datasets_provider(\n train_val_test_num_samples)\n\n # Build dataloders.\n train_dataloader = build_pretraining_data_loader(\n train_ds, args.consumed_train_samples)\n valid_dataloader = build_pretraining_data_loader(\n valid_ds, args.consumed_valid_samples)\n test_dataloader = build_pretraining_data_loader(test_ds, 0)\n\n # Flags to know if we need to do training/validation/testing.\n do_train = train_dataloader is not None and args.train_iters > 0\n do_valid = valid_dataloader is not None and args.eval_iters > 0\n do_test = test_dataloader is not None and args.eval_iters > 0\n # Need to broadcast num_tokens and num_type_tokens.\n flags = torch.cuda.LongTensor(\n [int(do_train), int(do_valid), int(do_test)])\n else:\n flags = torch.cuda.LongTensor([0, 0, 0])\n\n # Broadcast num tokens.\n torch.distributed.broadcast(flags,\n mpu.get_tensor_model_parallel_src_rank(),\n group=mpu.get_tensor_model_parallel_group())\n args.do_train = flags[0].item()\n args.do_valid = flags[1].item()\n args.do_test = flags[2].item()\n\n\n # Build iterators.\n dl_type = args.dataloader_type\n assert dl_type in ['single', 'cyclic']\n\n if train_dataloader is not None:\n train_data_iterator = iter(train_dataloader) if dl_type == 'single' \\\n else iter(cyclic_iter(train_dataloader))\n else:\n train_data_iterator = None\n\n if valid_dataloader is not None:\n valid_data_iterator = iter(valid_dataloader) if dl_type == 'single' \\\n else iter(cyclic_iter(valid_dataloader))\n else:\n valid_data_iterator = None\n\n if test_dataloader is not None:\n test_data_iterator = iter(test_dataloader) if dl_type == 'single' \\\n else iter(cyclic_iter(test_dataloader))\n else:\n test_data_iterator = None\n\n return train_data_iterator, valid_data_iterator, test_data_iterator\n"
] | [
[
"torch.no_grad",
"torch.cuda.LongTensor",
"torch.cuda.IntTensor",
"torch.cuda.current_device",
"torch.cuda.empty_cache",
"torch.distributed.all_reduce",
"torch.cuda.memory_stats",
"torch.cuda.FloatTensor",
"torch.distributed.barrier",
"torch.cuda.DoubleTensor"
]
] |
tcapelle/lrp | [
"26398ff91bbefd383624a74afb29e91074420df1"
] | [
"Plot.py"
] | [
"import networkx as nx\nimport matplotlib.pyplot as plt\n\ndef Plot_grafo(G,arcos,s): #grafo, si dibuja los arcos\n M=G.order()\n pos={}\n plt.figure(figsize=(10,10))\n t=0\n\n for n in G:\n pos[n]=(G.node[n]['x'],G.node[n]['y'])\n if G.node[n]['tipo']=='terminal':\n nx.draw_networkx_nodes(G,pos,nodelist=[n],\n node_size=500,\n node_color='r',\n alpha=0.7,\n cmap=plt.cm.Reds_r)\n nx.draw_networkx_labels(G,pos,{n:t},font_size=10)\n t=t+1;\n else:\n nx.draw_networkx_nodes(G,pos,nodelist=[n],\n node_size=300,\n node_color='w',\n cmap=plt.cm.Reds_r)\n nx.draw_networkx_labels(G,pos,{n:G.node[n]['tipo']},font_size=10)\n \n #nx.draw_networkx_labels(G,pos,{n:n}) #etiquetas reales\n \n if arcos:\n nx.draw_networkx_edges(G,pos, edge_color='gray', alpha=0.16)\n \n \n\n plt.xlim(-1,51)\n plt.ylim(-1,51)\n plt.axis('on')\n plt.savefig(s+'.png')\n #plt.show()\n \ndef Plot_ruta(G,R,t,s='AA'): #grafo G, ruta R, terminal t\n M=G.order()\n s=s+str((M-2)/2)\n pos={}\n plt.figure(figsize=(10,10))\n for n in G:\n pos[n]=(G.node[n]['x'],G.node[n]['y'])\n if G.node[n]['tipo']=='terminal':\n nx.draw_networkx_nodes(G,pos,nodelist=[n],\n node_size=500,\n node_color='r',\n alpha=0.7,\n cmap=plt.cm.Reds_r)\n nx.draw_networkx_labels(G,pos,{n:t},font_size=10)\n else:\n nx.draw_networkx_nodes(G,pos,nodelist=[n],\n node_size=300,\n node_color='w',\n cmap=plt.cm.Reds_r)\n nx.draw_networkx_labels(G,pos,{n:G.node[n]['tipo']},font_size=10)\n \n #nx.draw_networkx_labels(G,pos,{n:n}) #etiquetas reales\n #print R\n edges=[]\n anterior=R[0]\n Raux=R[1:len(R)]\n for n in Raux:\n edges.append((anterior,n))\n anterior=n \n #print edges\n nx.draw_networkx_edges(G,pos, edgelist=edges,edge_color='black',alpha=0.7)\n \n #plt.xlim(-0.05,1.05)\n #plt.ylim(-0.05,1.05)\n plt.axis('off')\n plt.savefig(s+'.png')\n plt.show()\n\n# def Plot_rutas(G,Rs,t,s='AA'): #grafo G, ruta R, terminal t\n# M=G.order()\n# s=s+str((M-2)/2)\n# pos={}\n# plt.figure(figsize=(10,10))\n# for n in G:\n# pos[n]=(G.node[n]['x'],G.node[n]['y'])\n# if G.node[n]['tipo']=='terminal':\n# nx.draw_networkx_nodes(G,pos,nodelist=[n],\n# node_size=500,\n# node_color='r',\n# alpha=0.7,\n# cmap=plt.cm.Reds_r)\n# nx.draw_networkx_labels(G,pos,{n:t},font_size=10)\n# else:\n# nx.draw_networkx_nodes(G,pos,nodelist=[n],\n# node_size=300,\n# node_color='w',\n# cmap=plt.cm.Reds_r)\n# nx.draw_networkx_labels(G,pos,{n:G.node[n]['tipo']},font_size=10)\n \n# #nx.draw_networkx_labels(G,pos,{n:n}) #etiquetas reales\n# #print R\n# edges=[]\n# anterior=R[0]\n# Raux=R[1:len(R)]\n# for n in Raux:\n# edges.append((anterior,n))\n# anterior=n \n# #print edges\n# nx.draw_networkx_edges(G,pos, edgelist=edges,edge_color='black',alpha=0.7)\n \n# #plt.xlim(-0.05,1.05)\n# #plt.ylim(-0.05,1.05)\n# plt.axis('off')\n# plt.savefig(s+'.png')\n# plt.show()\n "
] | [
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis"
]
] |
prise-3d/models-comparisons | [
"9152c705000cb0fb43928a61ca77318a30505c77"
] | [
"models.py"
] | [
"# models imports\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier, VotingClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.feature_selection import RFECV\nimport sklearn.svm as svm\nfrom skrvm import RVC\n\n\n\ndef _get_best_model(X_train, y_train):\n\n Cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]\n gammas = [0.001, 0.01, 0.1, 1, 5, 10, 100]\n param_grid = {'kernel':['rbf'], 'C': Cs, 'gamma' : gammas}\n\n svc = svm.SVC(probability=True, class_weight='balanced')\n clf = GridSearchCV(svc, param_grid, cv=10, verbose=1, scoring=\"accuracy\", n_jobs=-1)\n\n clf.fit(X_train, y_train)\n\n model = clf.best_estimator_\n\n return model\n\ndef _get_best_model_rvm(X_train, y_train):\n\n Cs = [0.001, 0.01, 0.1, 1, 10, 100]\n #Cs = [1, 2, 4, 8, 16, 32]\n gammas = [0.001, 0.01, 0.1, 1, 10, 100]\n #param_grid = {'kernel':['rbf'], 'C': Cs, 'gamma' : gammas}\n param_grid = {'kernel':['rbf', 'linear'], 'gamma' : gammas, 'C': Cs}\n\n rvc = RVC()\n clf = GridSearchCV(rvc, param_grid, scoring='accuracy', cv=5, verbose=2, n_jobs=-1)\n #cv=10, scoring='accuracy', verbose=2)\n\n clf.fit(X_train, y_train)\n\n model = clf.best_estimator_\n\n return model\n\n# def gpu_svm_model(X_train, y_train):\n# clf = SVC()\n# return clf.fit(X_train, y_train)\n\ndef svm_model(X_train, y_train):\n\n return _get_best_model(X_train, y_train)\n\ndef rvm_model(X_train, y_train):\n\n return _get_best_model_rvm(X_train, y_train)\n\n\ndef ensemble_model(X_train, y_train):\n\n svm_model = _get_best_model(X_train, y_train)\n\n lr_model = LogisticRegression(solver='liblinear', multi_class='ovr', random_state=1)\n rf_model = RandomForestClassifier(n_estimators=100, random_state=1)\n\n ensemble_model = VotingClassifier(estimators=[\n ('svm', svm_model), ('lr', lr_model), ('rf', rf_model)], voting='soft', weights=[1,1,1])\n\n ensemble_model.fit(X_train, y_train)\n\n return ensemble_model\n\n\ndef ensemble_model_v2(X_train, y_train):\n\n svm_model = _get_best_model(X_train, y_train)\n knc_model = KNeighborsClassifier(n_neighbors=2)\n gbc_model = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)\n lr_model = LogisticRegression(solver='liblinear', multi_class='ovr', random_state=1)\n rf_model = RandomForestClassifier(n_estimators=100, random_state=1)\n\n ensemble_model = VotingClassifier(estimators=[\n ('lr', lr_model),\n ('knc', knc_model),\n ('gbc', gbc_model),\n ('svm', svm_model),\n ('rf', rf_model)],\n voting='soft', weights=[1, 1, 1, 1, 1])\n\n ensemble_model.fit(X_train, y_train)\n\n return ensemble_model\n\n\n\ndef get_trained_model(choice, X_train, y_train):\n\n if choice == 'svm_model':\n return svm_model(X_train, y_train)\n\n if choice == 'rvm_model':\n return rvm_model(X_train, y_train)\n\n if choice == 'ensemble_model':\n return ensemble_model(X_train, y_train)\n\n if choice == 'ensemble_model_v2':\n return ensemble_model_v2(X_train, y_train)"
] | [
[
"sklearn.ensemble.VotingClassifier",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.svm.SVC",
"sklearn.linear_model.LogisticRegression",
"sklearn.model_selection.GridSearchCV",
"sklearn.ensemble.GradientBoostingClassifier"
]
] |
j-bac/id-concentration | [
"372bacc846d3d5dd5f99178863fa214fb8d3b292"
] | [
"estimators/_DANCo.py"
] | [
"### Credits to Gabriele Lombardi\n### https://fr.mathworks.com/matlabcentral/fileexchange/40112-intrinsic-dimensionality-estimation-techniques\n### for the original MATLAB implementation\n\n### Credits to Kerstin Johnsson\n### https://cran.r-project.org/web/packages/intrinsicDimension/index.html\n### for the R implementation\n\nimport sys\nimport numpy as np\nimport pickle\nfrom sklearn.neighbors import NearestNeighbors\nfrom scipy.optimize import minimize\nfrom scipy.special import i0,i1,digamma,gammainc\nfrom scipy.interpolate import interp1d,interp2d\nfrom ._commonfuncs import binom_coeff, get_nn, randsphere, lens, indnComb\nfrom pathlib import Path\npath_to_estimators = str(Path(__file__).resolve().parent)\n\n\ndef KL(nocal, caldat, k):\n kld = KLd(nocal['dhat'], caldat['dhat'], k)\n klnutau = KLnutau(nocal['mu_nu'], caldat['mu_nu'],\n nocal['mu_tau'], caldat['mu_tau'])\n #print(klnutau)\n return(kld + klnutau)\n\ndef KLd(dhat, dcal, k):\n H_k = np.sum(1/np.arange(1,k+1)) \n quo = dcal/dhat\n a = np.power(-1,np.arange(k+1))*np.array(list(binom_coeff(k,i) for i in range(k+1)))*digamma(1 + np.arange(k+1)/quo)\n return(H_k*quo - np.log(quo) - (k-1)*np.sum(a))\n\ndef KLnutau(nu1, nu2, tau1, tau2):\n return(np.log(min(sys.float_info.max,i0(tau2))/min(sys.float_info.max,i0(tau1))) + \n min(sys.float_info.max,i1(tau1))/min(sys.float_info.max,i0(tau1))*(tau1 - tau2*np.cos(nu1-nu2)))\n\ndef nlld(d, rhos, k, N):\n return(-lld(d, rhos, k, N))\n\ndef lld(d, rhos, k, N):\n if (d == 0):\n return(np.array([-1e30]))\n else:\n return N*np.log(k*d) + (d-1)*np.sum(np.log(rhos)) + (k-1)*np.sum(np.log(1-rhos**d))\n \ndef nlld_gr(d, rhos, k, N):\n if (d == 0):\n return(np.array([-1e30]))\n else:\n return -(N/d + np.sum(np.log(rhos) - (k-1)*(rhos**d)*np.log(rhos)/(1 - rhos**d)))\n\ndef MIND_MLk(rhos, k, D):\n N = len(rhos)\n d_lik = np.array([np.nan]*D)\n for d in range(D):\n d_lik[d] = lld(d, rhos, k, N)\n return(np.argmax(d_lik))\n\ndef MIND_MLi(rhos, k, D, dinit):\n res = minimize(fun=nlld,\n x0=np.array([dinit]),\n jac=nlld_gr,\n args=(rhos, k, len(rhos)),\n method = 'L-BFGS-B',\n bounds=[(0,D)])\n\n #if(!is.null(res$message)) print(res$message)\n return(res['x']) \n\n\ndef MIND_MLx(data, k, D, ver):\n nbh_data,idx = get_nn(data, k+1)\n rhos = nbh_data[:,0]/nbh_data[:,-1]\n \n d_MIND_MLk = MIND_MLk(rhos, k, D)\n if (ver == 'MIND_MLk'):\n return(d_MIND_MLk)\n\n d_MIND_MLi = MIND_MLi(rhos, k, D, d_MIND_MLk)\n if (ver == 'MIND_MLi'):\n return(d_MIND_MLi)\n else:\n raise ValueError(\"Unknown version: \", ver)\n\ndef Ainv(eta):\n if (eta < .53):\n return(2*eta + eta**3 + 5*(eta**5)/6)\n elif (eta < .85):\n return(-.4 + 1.39*eta + .43/(1-eta))\n else:\n return(1/((eta**3)-4*(eta**2)+3*eta))\n\ndef loc_angles(pt, nbs):\n vec = nbs-pt\n # if(len(pt) == 1):\n # vec = vec.T\n vec_len = lens(vec)\n combs = indnComb(len(nbs), 2).T\n sc_prod = np.sum(vec[combs[0,:]]*vec[combs[1,:]],axis=1)\n #if (length(pt) == 1) {\n #print(sc.prod)\n #print((vec.len[combs[1, ]]*vec.len[combs[2, ]]))\n #}\n cos_th = sc_prod/(vec_len[combs[0,:]]*vec_len[combs[1,:]])\n if (any(abs(cos_th) > 1)):\n print(cos_th[np.abs(cos_th) > 1])\n return(np.arccos(cos_th))\n\ndef angles(data, nbs):\n N = len(data)\n k = nbs.shape[1]\n \n thetas = np.zeros((N, binom_coeff(k, 2)))\n for i in range(N):\n nb_data = data[nbs[i, ],]\n thetas[i, ] = loc_angles(data[i, ], nb_data) \n return(thetas)\n\ndef ML_VM(thetas):\n sinth = np.sin(thetas)\n costh = np.cos(thetas)\n nu = np.arctan(np.sum(sinth)/np.sum(costh))\n eta = np.sqrt(np.mean(costh)**2 + np.mean(sinth)**2)\n tau = Ainv(eta)\n return dict(nu = nu, tau = tau)\n\n\ndef dancoDimEstNoCalibration(data, k, D, n_jobs=1):\n nbh_data,idx = get_nn(data, k+1,n_jobs=n_jobs)\n rhos = nbh_data[:,0]/nbh_data[:,-1]\n d_MIND_MLk = MIND_MLk(rhos, k, D)\n d_MIND_MLi = MIND_MLi(rhos, k, D, d_MIND_MLk)\n\n thetas = angles(data, idx[:,:k])\n ml_vm = list(map(ML_VM,thetas))\n mu_nu = np.mean([i['nu'] for i in ml_vm])\n mu_tau = np.mean([i['tau'] for i in ml_vm])\n if(data.shape[1] == 1):\n mu_tau = 1\n \n return dict(dhat = d_MIND_MLi, mu_nu = mu_nu, mu_tau = mu_tau)\n\ndef DancoCalibrationData(k, N):\n me = dict(k = k,\n N = N,\n calibration_data = list(),\n maxdim = 0) \n return(me)\n\ndef increaseMaxDimByOne(dancoCalDat):\n newdim = dancoCalDat['maxdim'] + 1\n MIND_MLx_maxdim = newdim*2+5\n dancoCalDat['calibration_data'].append(dancoDimEstNoCalibration(randsphere(dancoCalDat['N'], newdim,1,center=[0]*newdim)[0], \n dancoCalDat['k'], \n MIND_MLx_maxdim))\n dancoCalDat['maxdim'] = newdim\n return(dancoCalDat)\n \ndef increaseMaxDimByOne_precomputedSpline(dancoCalDat,DANCo_splines):\n newdim = dancoCalDat['maxdim'] + 1\n dancoCalDat['calibration_data'].append({'dhat':DANCo_splines['spline_dhat'](newdim,dancoCalDat['N']),\n 'mu_nu':DANCo_splines['spline_mu'](newdim,dancoCalDat['N']),\n 'mu_tau':DANCo_splines['spline_tau'](newdim,dancoCalDat['N'])})\n dancoCalDat['maxdim'] = newdim\n return(dancoCalDat)\n\ndef computeDANCoCalibrationData(k,N,D):\n cal=DancoCalibrationData(k,N)\n while (cal['maxdim'] < D):\n if cal['maxdim']%10==0:\n print(cal['maxdim'])\n cal = increaseMaxDimByOne(cal)\n return cal\n\n\ndef dancoDimEst(data, k, D, ver = 'DANCo', fractal = True, calibration_data = None):\n \n cal = calibration_data\n N = len(data)\n \n if cal is not None:\n if (cal['k'] != k):\n raise ValueError(\"Neighborhood parameter k = %s does not agree with neighborhood parameter of calibration data, cal$k = %s\",\n k, cal['k'])\n if (cal['N'] != N):\n raise ValueError(\"Number of data points N = %s does not agree with number of data points of calibration data, cal$N = %s\",\n N, cal['N'])\n \n if (ver != 'DANCo' and ver != 'DANCoFit'):\n return(MIND_MLx(data, k, D, ver))\n \n nocal = dancoDimEstNoCalibration(data, k, D)\n if any(np.isnan(val) for val in nocal.values()):\n return dict(de=np.nan, kl_divergence = np.nan, calibration_data=cal)\n\n if (cal is None):\n cal = DancoCalibrationData(k, N)\n\n if (cal['maxdim'] < D): \n \n if ver == 'DANCoFit':\n print(\"Generating DANCo calibration data from precomputed spline interpolation for cardinality 50 to 5000, k = 10, dimensions 1 to 100\")\n\n #load precomputed splines as a function of dimension and dataset cardinality\n DANCo_splines = {}\n for spl in ['spline_dhat','spline_mu','spline_tau']:\n with open(path_to_estimators+'/DANCoFit/DANCo_'+spl+'.pkl', 'rb') as f:\n DANCo_splines[spl]=pickle.load(f)\n #compute interpolated statistics\n while (cal['maxdim'] < D):\n cal = increaseMaxDimByOne_precomputedSpline(cal,DANCo_splines)\n \n else:\n print(\"Computing DANCo calibration data for N = {}, k = {} for dimensions {} to {}\".format(N, k, cal['maxdim']+1, D))\n \n #compute statistics\n while (cal['maxdim'] < D):\n cal = increaseMaxDimByOne(cal)\n \n\n kl = np.array([np.nan]*D) \n for d in range(D) :\n kl[d] = KL(nocal, cal['calibration_data'][d], k) \n\n de = np.argmin(kl)+1\n \n if fractal:\n # Fitting with a cubic smoothing spline:\n f=interp1d(np.arange(1,D+1),kl,kind='cubic')\n # Locating the minima:\n de_fractal=minimize(f, de, bounds=[(1,D+1)],tol=1e-3)['x']\n return dict(de=de_fractal, kl_divergence = kl[de-1], calibration_data = cal)\n else:\n return dict(de=de, kl_divergence = kl[de-1], calibration_data = cal)\n"
] | [
[
"numpy.sin",
"numpy.array",
"numpy.arccos",
"numpy.isnan",
"numpy.argmin",
"numpy.log",
"scipy.special.i0",
"numpy.sum",
"numpy.mean",
"numpy.argmax",
"numpy.arange",
"numpy.cos",
"numpy.abs",
"scipy.optimize.minimize",
"scipy.special.i1"
]
] |
skydooms/tpu | [
"4553e1ed26763769768b9ba6744431908f7e37c0"
] | [
"models/experimental/resnet50_keras/resnet50_ctl_tf1.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"ResNet-50 implemented with Keras running on Cloud TPUs.\n\nThis file shows how you can run ResNet-50 on a Cloud TPU using the TensorFlow\nKeras support. This is configured for ImageNet (e.g. 1000 classes), but you can\neasily adapt to your own datasets by changing the code appropriately.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport tensorflow.compat.v1 as tf\n\nimport imagenet_input\nimport resnet_model\n\n\n# Common flags for TPU models.\nflags.DEFINE_string('tpu', None, 'Name of the TPU to use.')\nflags.DEFINE_string('data', None, 'Path to training and testing data.')\nflags.DEFINE_string(\n 'model_dir', None,\n ('The directory where the model weights and training/evaluation summaries '\n 'are stored. If not specified, save to /tmp/resnet50.'))\nflags.DEFINE_integer('num_cores', 8, 'Number of TPU cores.')\nFLAGS = flags.FLAGS\n\n# Imagenet training and test data sets.\nAPPROX_IMAGENET_TRAINING_IMAGES = 1281167 # Number of images in ImageNet-1k train dataset.\nIMAGENET_VALIDATION_IMAGES = 50000 # Number of eval images.\nPER_CORE_BATCH_SIZE = 128\nNUM_CLASSES = 1000\n\n# Training hyperparameters.\n_EPOCHS = 90\n_USE_BFLOAT16 = True\n_BASE_LEARNING_RATE = 0.4\nDEFAULT_MODEL_DIR = '/tmp/resnet50'\n_WEIGHTS_TXT = 'resnet50_weights'\n\n# Allow overriding epochs, steps_per_epoch for testing\nflags.DEFINE_integer('num_epochs', _EPOCHS, '')\nflags.DEFINE_integer(\n 'steps_per_epoch', None,\n 'Steps for epoch during training. If unspecified, use default value.')\n\n# Learning rate schedule\n_LR_SCHEDULE = [ # (multiplier, epoch to start) tuples\n (1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)\n]\n\n\ndef compute_learning_rate(lr_epoch):\n \"\"\"Learning rate for each step.\"\"\"\n warmup_lr_multiplier, warmup_end_epoch = _LR_SCHEDULE[0]\n if lr_epoch < warmup_end_epoch:\n # Learning rate increases linearly per step.\n return (_BASE_LEARNING_RATE * warmup_lr_multiplier *\n lr_epoch / warmup_end_epoch)\n for mult, start_epoch in _LR_SCHEDULE:\n if lr_epoch >= start_epoch:\n learning_rate = _BASE_LEARNING_RATE * mult\n else:\n break\n return learning_rate\n\n\ndef main(unused_argv):\n\n model_dir = FLAGS.model_dir if FLAGS.model_dir else DEFAULT_MODEL_DIR\n batch_size = PER_CORE_BATCH_SIZE * FLAGS.num_cores\n steps_per_epoch = FLAGS.steps_per_epoch or (int(\n APPROX_IMAGENET_TRAINING_IMAGES // batch_size))\n steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size\n\n logging.info('Saving checkpoints at %s', model_dir)\n\n logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local')\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.experimental.TPUStrategy(resolver)\n\n imagenet_train = imagenet_input.ImageNetInput(\n is_training=True,\n data_dir=FLAGS.data,\n batch_size=batch_size,\n use_bfloat16=_USE_BFLOAT16)\n imagenet_eval = imagenet_input.ImageNetInput(\n is_training=False,\n data_dir=FLAGS.data,\n batch_size=batch_size,\n use_bfloat16=_USE_BFLOAT16)\n\n train_iterator = strategy.experimental_distribute_dataset(\n imagenet_train.input_fn()).make_initializable_iterator()\n test_iterator = strategy.experimental_distribute_dataset(\n imagenet_eval.input_fn()).make_initializable_iterator()\n\n with strategy.scope():\n logging.info('Building Keras ResNet-50 model')\n model = resnet_model.ResNet50(num_classes=NUM_CLASSES)\n optimizer = tf.keras.optimizers.SGD(\n learning_rate=_BASE_LEARNING_RATE, momentum=0.9, nesterov=True)\n training_loss = tf.keras.metrics.Mean('training_loss', dtype=tf.float32)\n training_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(\n 'training_accuracy', dtype=tf.float32)\n test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32)\n test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(\n 'test_accuracy', dtype=tf.float32)\n logging.info('Finished building Keras ResNet-50 model')\n\n def train_step(inputs):\n \"\"\"Training StepFn.\"\"\"\n images, labels = inputs\n with tf.GradientTape() as tape:\n predictions = model(images, training=True)\n\n # Loss calculations.\n #\n # Part 1: Prediciton loss.\n prediction_loss = tf.keras.losses.sparse_categorical_crossentropy(\n labels, predictions)\n loss1 = tf.reduce_mean(prediction_loss)\n # Part 2: Model weights regularization\n loss2 = tf.reduce_sum(model.losses)\n\n # Scale the loss given the TPUStrategy will reduce sum all gradients.\n loss = loss1 + loss2\n loss = loss / strategy.num_replicas_in_sync\n\n grads = tape.gradient(loss, model.trainable_variables)\n update_vars = optimizer.apply_gradients(\n zip(grads, model.trainable_variables))\n update_loss = training_loss.update_state(loss)\n update_accuracy = training_accuracy.update_state(labels, predictions)\n with tf.control_dependencies([update_vars, update_loss, update_accuracy]):\n return tf.identity(loss)\n\n def test_step(inputs):\n \"\"\"Evaluation StepFn.\"\"\"\n images, labels = inputs\n predictions = model(images, training=False)\n loss = tf.keras.losses.sparse_categorical_crossentropy(labels, predictions)\n loss = tf.reduce_mean(loss) / strategy.num_replicas_in_sync\n update_loss = test_loss.update_state(loss)\n update_accuracy = test_accuracy.update_state(labels, predictions)\n with tf.control_dependencies([update_loss, update_accuracy]):\n return tf.identity(loss)\n\n dist_train = strategy.experimental_local_results(\n strategy.experimental_run_v2(train_step, args=(next(train_iterator),)))\n dist_test = strategy.experimental_local_results(\n strategy.experimental_run_v2(test_step, args=(next(test_iterator),)))\n\n training_loss_result = training_loss.result()\n training_accuracy_result = training_accuracy.result()\n test_loss_result = test_loss.result()\n test_accuracy_result = test_accuracy.result()\n\n train_iterator_init = train_iterator.initialize()\n test_iterator_init = test_iterator.initialize()\n\n config = tf.ConfigProto()\n config.allow_soft_placement = True\n cluster_spec = resolver.cluster_spec()\n if cluster_spec:\n config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())\n with tf.Session(target=resolver.master(), config=config) as sess:\n all_variables = (\n tf.global_variables() +\n training_loss.variables + training_accuracy.variables +\n test_loss.variables + test_accuracy.variables)\n sess.run([v.initializer for v in all_variables])\n sess.run(train_iterator_init)\n\n for epoch in range(0, FLAGS.num_epochs):\n logging.info('Starting to run epoch: %s', epoch)\n for step in range(steps_per_epoch):\n learning_rate = compute_learning_rate(epoch + 1 +\n (float(step) / steps_per_epoch))\n sess.run(optimizer.lr.assign(learning_rate))\n if step % 20 == 0:\n logging.info('Learning rate at step %s in epoch %s is %s', step,\n epoch, learning_rate)\n sess.run(dist_train)\n if step % 20 == 0:\n logging.info('Training loss: %s, accuracy: %s%%',\n round(sess.run(training_loss_result), 4),\n round(sess.run(training_accuracy_result) * 100, 2))\n training_loss.reset_states()\n training_accuracy.reset_states()\n\n sess.run(test_iterator_init)\n for step in range(steps_per_eval):\n if step % 20 == 0:\n logging.info('Starting to run eval step %s of epoch: %s', step,\n epoch)\n sess.run(dist_test)\n if step % 20 == 0:\n logging.info('Test loss: %s, accuracy: %s%%',\n round(sess.run(test_loss_result), 4),\n round(sess.run(test_accuracy_result) * 100, 2))\n test_loss.reset_states()\n test_accuracy.reset_states()\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.INFO)\n app.run(main)\n"
] | [
[
"tensorflow.compat.v1.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.compat.v1.keras.losses.sparse_categorical_crossentropy",
"tensorflow.compat.v1.distribute.experimental.TPUStrategy",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.global_variables",
"tensorflow.compat.v1.keras.metrics.Mean",
"tensorflow.compat.v1.identity",
"tensorflow.compat.v1.tpu.experimental.initialize_tpu_system",
"tensorflow.compat.v1.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.GradientTape",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.keras.optimizers.SGD",
"tensorflow.compat.v1.reduce_mean"
]
] |
kewitz/SAET2014 | [
"1601cb22ca7d221f42c7ba9759504a3ce13ccd27"
] | [
"File.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nThe MIT License (MIT)\nCopyright (c) 2014 Leonardo Kewitz\n\nExemplo de leitura e processamento de um arquivo CSV no Python. Neste exemplo\num arquivo obtido no analisador de espéctro é lido, convertido e plotado.\n\"\"\"\n\n# Importa bibliotecas necessárias.\nfrom numpy import *\nimport matplotlib.pyplot as plt\n\n# Com o arquivo CSV aberto na variável \"f\"...\nwith open(\"./src/FSP_RADIO.CSV\") as f:\n # Leia todas as linhas e as separe nos caracteres \";\"\n lines = [l.strip().split(';') for l in f.readlines()]\n # Transforma essas linhas num array Numpy, para trabalharmos depois.\n lines = array(lines)\n\n# Cria um array chamado \"famp\" convertendo as 2 primeiras colunas de lines para\n# ponto flutuante. No caso teremos um array (501,2) onde as colunas são respect-\n# ivamente frequência e amplitude do sinal capturado no analisador de espectro.\nfamp = array([[float(a[0]), float(a[1])] for a in lines[:,:2]])\n\n# Plota um gráfico frequência x amplitude.\nplt.plot(famp[:,0], famp[:,1])\n# Adiciona grade no gráfico.\nplt.grid()\n\n# Vamos criar o eixo X a partir de 7 pontos equidistantes nas frequências.\nticks = famp[::len(lines)//7,0]\n# Vamos formatar para MHz\nx = [\"{0:.2f}\".format(t/1E6) for t in ticks]\n# Seta o novo eixo X.\nplt.xticks(ticks, x)\n# Adiciona nome para os eixos.\nplt.xlabel(\"Frequência [MHz]\")\nplt.ylabel(\"Potência [dBm]\")\n\n# Destaca algumas frequências conhecidas...\nplt.axvline(107.1E6, label=\"FURB\", color=\"orange\")\nplt.axvline(102.7E6, label=\"Atl.\", color=\"red\")\nplt.axvline(105.3E6, label=\"Diplo.\", color=\"blue\")\n\n# Plota legenda.\nplt.legend(fontsize=\"medium\", fancybox=True, framealpha=.5, ncol=3, loc=9)\n"
] | [
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.xticks"
]
] |
ThanapolKong/Streamlit | [
"f8f3d990f012e2d3d516e5de8ad96f1ad4b63763"
] | [
"streamlit_app.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright 2018-2019 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An example of showing geographic data.\"\"\"\n\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport altair as alt\nimport pydeck as pdk\nfrom PIL import Image\n\n\n# SETTING PAGE CONFIG TO WIDE MODE\n\nst.set_page_config(layout=\"wide\")\nst.title(\"By Thanapol Kuharungroj 6130811321\")\n\n# SETTING Sidebar\nst.sidebar.header(\"Please select here\")\ndate_select = st.sidebar.selectbox(\"Date Selection\",(\"1-Jan-2019\", \"2-Jan-2019\",\"3-Jan-2019\",\"4-Jan-2019\",\"5-Jan-2019\"))\nhour_selected = st.sidebar.slider(\"Select hour of travelling\", 0, 23)\n\n# LAYING OUT THE TOP SECTION\n\nst.text(\n\"\"\"\n\nShowing the number of travelling started for Bangkok.\nBy sliding and selecting date.\n\n\"\"\")\n\n\ntimedisplay = \"Date Displayed : \" + date_select\nst.title(timedisplay)\n\n\n# LOADING DATA\nDATE_TIME = \"date/time\"\nDATA_DAY1 = (\"https://raw.githubusercontent.com/Maplub/odsample/master/20190101.csv\")\nDATA_DAY2 = (\"https://raw.githubusercontent.com/Maplub/odsample/master/20190102.csv\")\nDATA_DAY3= (\"https://raw.githubusercontent.com/Maplub/odsample/master/20190103.csv\")\nDATA_DAY4 = (\"https://raw.githubusercontent.com/Maplub/odsample/master/20190104.csv\")\nDATA_DAY5 = (\"https://raw.githubusercontent.com/Maplub/odsample/master/20190105.csv\")\n\n#SELECT DATA ACCORDING TO date_select\nif date_select == \"1-Jan-2019\" :\n DATA_URL = DATA_DAY1\nelif date_select == \"2-Jan-2019\" :\n DATA_URL = DATA_DAY2\nelif date_select == \"3-Jan-2019\" :\n DATA_URL = DATA_DAY3\nelif date_select == \"4-Jan-20199\" :\n DATA_URL = DATA_DAY4\nelif date_select == \"5-Jan-2019\" :\n DATA_URL = DATA_DAY5\n\[email protected](persist=True)\ndef load_data(nrows):\n data = pd.read_csv(DATA_URL, nrows=nrows)\n data = data[['timestart','latstartl','lonstartl']].copy()\n data = data.rename(columns = {'timestart': 'Date/Time', 'latstartl': 'Lat', 'lonstartl': 'Lon'}, inplace = False)\n lowercase = lambda x: str(x).lower()\n data.rename(lowercase, axis=\"columns\", inplace=True)\n data[DATE_TIME] = pd.to_datetime(data[DATE_TIME])\n return data\n\n\ndata = load_data(100000)\n\n##################################################################################\n##################################################################################\n# CREATING FUNCTION FOR MAPS\n\ndef map(data, lat, lon, zoom):\n st.write(pdk.Deck(\n map_style=\"mapbox://styles/mapbox/light-v9\",\n initial_view_state={\n \"latitude\": lat,\n \"longitude\": lon,\n \"zoom\": zoom,\n \"pitch\": 50,\n },\n layers=[\n pdk.Layer(\n \"HexagonLayer\",\n data=data,\n get_position=[\"lon\", \"lat\"],\n radius=100,\n elevation_scale=4,\n elevation_range=[0, 1000],\n pickable=True,\n extruded=True,\n ),\n ]\n ))\n \n##################################################################################\n##################################################################################\n# FILTERING DATA BY HOUR SELECTED\ndata = data[(data[DATE_TIME].dt.hour == hour_selected) & (data[DATE_TIME].dt.year == 2019)]\n\n\n# LAYING OUT THE MIDDLE SECTION OF THE APP WITH THE MAPS\n\n\n# SETTING THE ZOOM LOCATIONS\nzoom_level = 10\n\nmidpoint = [13.774646978946686, 100.62422280542049]\n\nmap(data, midpoint[0], midpoint[1], zoom_level)\nst.write(\"Diagram for route using at time selected\")\n\n\n# FILTERING DATA FOR THE HISTOGRAM\nfiltered = data[\n (data[DATE_TIME].dt.hour >= hour_selected) & (data[DATE_TIME].dt.hour < (hour_selected + 1))\n ]\n\nhist = np.histogram(filtered[DATE_TIME].dt.minute, bins=60, range=(0, 60))[0]\n\nchart_data = pd.DataFrame({\"minute\": range(60), \"Start travel location\": hist})\n\n# LAYING OUT THE HISTOGRAM SECTION\nst.write(\"\")\nst.altair_chart(alt.Chart(chart_data)\n .mark_area(\n interpolate='step-after',\n ).encode(\n x=alt.X(\"minute:Q\", scale=alt.Scale(nice=False)),\n y=alt.Y(\"Start travel location:Q\"),\n tooltip=['minute', 'Start travel location']\n ).configure_mark(\n opacity=0.5,\n color='red'\n ), use_container_width=True)"
] | [
[
"pandas.to_datetime",
"numpy.histogram",
"pandas.read_csv"
]
] |
danielt17/Reverse-cyclic-redundancy-check-CRC- | [
"78c907706534c36d632651a46ebf7a67446ffbb8"
] | [
"Utils.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 6 21:19:29 2022\r\n\r\n@author: DanielT17\r\n\"\"\"\r\n\r\n# %% Imports\r\n\r\n\r\nfrom collections import Counter\r\nfrom math import ceil\r\nimport numpy as np\r\n\r\n# %% Functions\r\n\r\ndef Swap(a,b):\r\n '''\r\n Description: \r\n This function swaps two variables.\r\n Inputs:\r\n a,b - ints.\r\n Outputs:\r\n c,d - ints.\r\n '''\r\n c = b\r\n d = a\r\n return c,d\r\n\r\ndef Bitstring_To_Bytes(s,endian='big'):\r\n '''\r\n Description: \r\n This function gets a bit string binary and turns it to bytes array.\r\n Inputs:\r\n s - string - binary string.\r\n endian - str - big or little endian representation\r\n Outputs:\r\n return - byte array of s.\r\n '''\r\n return int(s, 2).to_bytes((len(s) + 7) // 8, byteorder=endian)\r\n\r\ndef Bytearray_To_Int(s,endian=\"big\"):\r\n '''\r\n Description: \r\n This function turn a byte array into an int.\r\n Inputs:\r\n s - byte array.\r\n Outputs:\r\n returns - int.\r\n '''\r\n return int.from_bytes(s, endian)\r\n\r\ndef Int_To_Bytearray(s,endian=\"big\"):\r\n '''\r\n Description: \r\n This function turns an int into a bytearray.\r\n Inputs:\r\n s - int.\r\n Outputs:\r\n returns - byte array.\r\n '''\r\n return s.to_bytes(ceil(s.bit_length()/8),endian)\r\n\r\ndef Remove_Zeros_From_Binary_String(string):\r\n '''\r\n Description: \r\n This function removes preappended zeros to a binary string.\r\n Inputs:\r\n string - a string sequence of ones and zeros.\r\n Outputs:\r\n string - without preappended zeros.\r\n '''\r\n counter = 0\r\n for char in string:\r\n if char == '0':\r\n counter += 1\r\n else:\r\n break\r\n return string[counter:]\r\n\r\ndef Turn_Bitstring_To_Numpy_Array_Of_Bits(string,crc_width):\r\n '''\r\n Description:\r\n This function turns a bit string into a numpy array of size crc_width\r\n where each arr[i] is equal to string[i]. A binary vector in GF(2).\r\n Inputs:\r\n string - string - a binary string.\r\n crc_width - int - the crc polynomial width\r\n Outputs:\r\n arr - numpy array - vector version of the binary string in GF(2).\r\n '''\r\n arr = np.zeros((1,crc_width),dtype=np.uint8)\r\n for i in range(crc_width):\r\n arr[0,i] = int(string[i])\r\n return arr\r\n\r\ndef Turn_Numpy_Array_Of_Bits_To_Bitstring(arr,crc_width):\r\n '''\r\n Description:\r\n This function turns a numpy array of bits in GF(2) to a bit string.\r\n Inputs:\r\n arr - numpy array - a vector of bits in GF(2).\r\n crc_width - int - the crc polynomial width\r\n Outputs:\r\n string - string - a binary string.\r\n '''\r\n string = ''\r\n for i in range(crc_width):\r\n string += str(arr[i])\r\n return string\r\n\r\ndef Byte_Xor(ba1, ba2):\r\n \"\"\"\r\n Description:\r\n This function computes the xor between two byte arrays.\r\n Inputs:\r\n ba1, ba2 - byte arrays - are byte arrays of the same size to be xored.\r\n Outputs:\r\n xored - byte array - A byte array with the xored result.\r\n \"\"\"\r\n xored = bytes([_a ^ _b for _a, _b in zip(ba1, ba2)])\r\n return xored\r\n\r\ndef Unique(ls,version=0):\r\n '''\r\n Description:\r\n This function find unique elemnts in a list, created because numpy\r\n unique functionality discards string when using unique.\r\n Inputs:\r\n ls - list - list to be uniqued.\r\n version - int - type of unique method.\r\n Outputs:\r\n unique_list - list - list of unique elements.\r\n '''\r\n if version == 0:\r\n unique_list = Counter(ls).keys()\r\n counts = Counter(ls).values()\r\n return list(unique_list),list(counts)\r\n elif version == 1:\r\n unique_list = []\r\n for x in ls:\r\n if x not in unique_list:\r\n unique_list.append(x)\r\n return unique_list\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"numpy.zeros"
]
] |
treeson-li/onsets_frames_transcription | [
"2cebfe738ea23258b3223094ab25d4e130ac2caf"
] | [
"onsets_frames_transcription_create_dataset_maps.py"
] | [
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Create the tfrecord files necessary for training onsets and frames.\n\nThe training files are split in ~20 second chunks by default, the test files\nare not split.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport glob\nimport os\nimport re\n\nimport split_audio_and_label_data\n\nfrom magenta.music import audio_io\nfrom magenta.music import midi_io\n\nimport tensorflow as tf\n\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('input_dir', None,\n 'Directory where the un-zipped MAPS files are.')\ntf.app.flags.DEFINE_string('output_dir', './',\n 'Directory where the two output TFRecord files '\n '(train and test) will be placed.')\ntf.app.flags.DEFINE_integer('min_length', 5, 'minimum segment length')\ntf.app.flags.DEFINE_integer('max_length', 20, 'maximum segment length')\ntf.app.flags.DEFINE_integer('sample_rate', 16000, 'desired sample rate')\n\ntest_dirs = ['ENSTDkCl/MUS', 'ENSTDkAm/MUS']\ntrain_dirs = [\n 'AkPnBcht/MUS', 'AkPnBsdf/MUS', 'AkPnCGdD/MUS', 'AkPnStgb/MUS',\n 'SptkBGAm/MUS', 'SptkBGCl/MUS', 'StbgTGd2/MUS'\n]\n\n\ndef filename_to_id(filename):\n \"\"\"Translate a .wav or .mid path to a MAPS sequence id.\"\"\"\n return re.match(r'.*MUS-(.*)_[^_]+\\.\\w{3}',\n os.path.basename(filename)).group(1)\n\n\ndef generate_train_set(exclude_ids):\n \"\"\"Generate the train TFRecord.\"\"\"\n train_file_pairs = []\n for directory in train_dirs:\n path = os.path.join(FLAGS.input_dir, directory)\n path = os.path.join(path, '*.wav')\n wav_files = glob.glob(path)\n # find matching mid files\n for wav_file in wav_files:\n base_name_root, _ = os.path.splitext(wav_file)\n mid_file = base_name_root + '.mid'\n if filename_to_id(wav_file) not in exclude_ids:\n train_file_pairs.append((wav_file, mid_file))\n\n train_output_name = os.path.join(FLAGS.output_dir,\n 'maps_config2_train.tfrecord')\n\n with tf.python_io.TFRecordWriter(train_output_name) as writer:\n for idx, pair in enumerate(train_file_pairs):\n print('{} of {}: {}'.format(idx, len(train_file_pairs), pair[0]))\n # load the wav data\n wav_data = tf.gfile.Open(pair[0], 'rb').read()\n # load the midi data and convert to a notesequence\n ns = midi_io.midi_file_to_note_sequence(pair[1])\n for example in split_audio_and_label_data.process_record(\n wav_data, ns, pair[0], FLAGS.min_length, FLAGS.max_length,\n FLAGS.sample_rate):\n writer.write(example.SerializeToString())\n\n\ndef generate_test_set():\n \"\"\"Generate the test TFRecord.\"\"\"\n test_file_pairs = []\n for directory in test_dirs:\n path = os.path.join(FLAGS.input_dir, directory)\n path = os.path.join(path, '*.wav')\n wav_files = glob.glob(path)\n # find matching mid files\n for wav_file in wav_files:\n base_name_root, _ = os.path.splitext(wav_file)\n mid_file = base_name_root + '.mid'\n test_file_pairs.append((wav_file, mid_file))\n\n test_output_name = os.path.join(FLAGS.output_dir,\n 'maps_config2_test.tfrecord')\n\n with tf.python_io.TFRecordWriter(test_output_name) as writer:\n for idx, pair in enumerate(test_file_pairs):\n print('{} of {}: {}'.format(idx, len(test_file_pairs), pair[0]))\n # load the wav data and resample it.\n samples = audio_io.load_audio(pair[0], FLAGS.sample_rate)\n wav_data = audio_io.samples_to_wav_data(samples, FLAGS.sample_rate)\n\n # load the midi data and convert to a notesequence\n ns = midi_io.midi_file_to_note_sequence(pair[1])\n\n example = split_audio_and_label_data.create_example(pair[0], ns, wav_data)\n writer.write(example.SerializeToString())\n\n return [filename_to_id(wav) for wav, _ in test_file_pairs]\n\n\ndef main(unused_argv):\n test_ids = generate_test_set()\n generate_train_set(test_ids)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n"
] | [
[
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.gfile.Open",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.app.run"
]
] |
Ambistic/CellTissue | [
"c7fce7bb9443a4dfc3b632d8f40aa598388f9d80"
] | [
"cbmos/solvers/adams_bashforth.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nimport numpy as np\nfrom scipy.integrate._ivp.ivp import OdeResult\nimport matplotlib.pyplot as plt\n\nplt.style.use('seaborn')\n\n\ndef solve_ivp(fun, t_span, y0, t_eval=None, dt=0.01):\n\n t0, tf = float(t_span[0]), float(t_span[-1])\n\n t = t0\n y = y0\n\n ts = [t]\n ys = [y]\n\n # start with 1 Euler forward step\n y = y + dt*fun(t,y)\n t = t + dt\n\n ts.append(t)\n ys.append(y)\n\n while t < tf:\n\n # take minimum of dt and tf-t in order to not overstep\n dt = np.minimum(dt, tf-t)\n\n y = y + dt/2.0*(3*fun(t,y)-fun(ts[-2], ys[-2]))\n t = t + dt\n\n ts.append(t)\n ys.append(y)\n\n ts = np.hstack(ts)\n ys = np.vstack(ys).T\n\n return OdeResult(t=ts, y=ys)\n\nif __name__ == \"__main__\":\n\n # stability region for Euler forward for this problem is h<2/50=0.04\n @np.vectorize\n def func(t,y):\n return -50*y\n\n t_span = (0,1)\n y0 = 1\n\n sol = solve_ivp(func, t_span, y0 )\n\n plt.figure()\n plt.plot(sol.t, sol.y.T)\n\n\n\n"
] | [
[
"scipy.integrate._ivp.ivp.OdeResult",
"numpy.minimum",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.style.use",
"numpy.hstack",
"numpy.vstack"
]
] |
nigma/pywt | [
"1b5ed1d5d2cc355fd0e7af591826d1be40522cee"
] | [
"demo/dwt_multidim.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pprint\n\nimport numpy\n\nimport pywt\n\ndata = numpy.ones((4, 4, 4, 4)) # 4D array\nresult = pywt.dwtn(data, 'db1') # sixteen 4D coefficient arrays\npprint.pprint(result)\n"
] | [
[
"numpy.ones"
]
] |
zjdcts/H-FC | [
"60a00322d77ae07519174a3eb0b02270aa8578c1"
] | [
"nnunet/training/network_training/BTS_Netv4_6_TrainerV2.py"
] | [
"# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom collections import OrderedDict\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\nfrom nnunet.training.data_augmentation.data_augmentation_moreDA import get_moreDA_augmentation\nfrom nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2\nfrom nnunet.utilities.to_torch import maybe_to_torch, to_cuda\nfrom nnunet.network_architecture.generic_BTS_Netv4_6 import Generic_BTS_Netv4_6\nfrom nnunet.network_architecture.initialization import InitWeights_He\nfrom nnunet.network_architecture.neural_network import SegmentationNetwork\nfrom nnunet.training.data_augmentation.default_data_augmentation import default_2D_augmentation_params, \\\n get_patch_size, default_3D_augmentation_params\nfrom nnunet.training.dataloading.dataset_loading import unpack_dataset\nfrom nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer\nfrom nnunet.utilities.nd_softmax import softmax_helper\nfrom sklearn.model_selection import KFold\nfrom torch import nn\nfrom torch.cuda.amp import autocast\nfrom nnunet.training.learning_rate.poly_lr import poly_lr\nfrom batchgenerators.utilities.file_and_folder_operations import *\n\n\nclass BTS_Netv4_6_TrainerV2(nnUNetTrainer):\n \"\"\"\n Info for Fabian: same as internal nnUNetTrainerV2_2\n \"\"\"\n\n def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,\n unpack_data=True, deterministic=True, fp16=False):\n super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,\n deterministic, fp16)\n self.max_num_epochs = 1000\n self.initial_lr = 3e-4\n self.deep_supervision_scales = None\n self.ds_loss_weights = None\n\n self.pin_memory = True\n\n def initialize(self, training=True, force_load_plans=False):\n \"\"\"\n - replaced get_default_augmentation with get_moreDA_augmentation\n - enforce to only run this code once\n - loss function wrapper for deep supervision\n\n :param training:\n :param force_load_plans:\n :return:\n \"\"\"\n if not self.was_initialized:\n maybe_mkdir_p(self.output_folder)\n\n if force_load_plans or (self.plans is None):\n self.load_plans_file()\n\n self.process_plans(self.plans)\n\n self.setup_DA_params()\n\n ################# Here we wrap the loss for deep supervision ############\n # we need to know the number of outputs of the network\n net_numpool = len(self.net_num_pool_op_kernel_sizes)\n\n # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases\n # this gives higher resolution outputs more weight in the loss\n weights = np.array([1 / (2 ** i) for i in range(net_numpool)])\n\n # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1\n mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])\n weights[~mask] = 0\n weights = weights / weights.sum()\n self.ds_loss_weights = weights\n self.ds_loss_weights = np.append(self.ds_loss_weights, np.array([0.5]), axis=0)\n # now wrap the loss\n self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)\n ################# END ###################\n\n self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +\n \"_stage%d\" % self.stage)\n if training:\n self.dl_tr, self.dl_val = self.get_basic_generators()\n if self.unpack_data:\n print(\"unpacking dataset\")\n unpack_dataset(self.folder_with_preprocessed_data)\n print(\"done\")\n else:\n print(\n \"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you \"\n \"will wait all winter for your model to finish!\")\n\n self.tr_gen, self.val_gen = get_moreDA_augmentation(\n self.dl_tr, self.dl_val,\n self.data_aug_params[\n 'patch_size_for_spatialtransform'],\n self.data_aug_params,\n deep_supervision_scales=self.deep_supervision_scales,\n pin_memory=self.pin_memory,\n use_nondetMultiThreadedAugmenter=False\n )\n self.print_to_log_file(\"TRAINING KEYS:\\n %s\" % (str(self.dataset_tr.keys())),\n also_print_to_console=False)\n self.print_to_log_file(\"VALIDATION KEYS:\\n %s\" % (str(self.dataset_val.keys())),\n also_print_to_console=False)\n else:\n pass\n\n self.initialize_network()\n self.initialize_optimizer_and_scheduler()\n\n assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))\n else:\n self.print_to_log_file('self.was_initialized is True, not running self.initialize again')\n self.was_initialized = True\n\n def initialize_network(self):\n \"\"\"\n - momentum 0.99\n - SGD instead of Adam\n - self.lr_scheduler = None because we do poly_lr\n - deep supervision = True\n - i am sure I forgot something here\n\n Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though\n :return:\n \"\"\"\n if self.threeD:\n conv_op = nn.Conv3d\n dropout_op = nn.Dropout3d\n norm_op = nn.InstanceNorm3d\n\n else:\n conv_op = nn.Conv2d\n dropout_op = nn.Dropout2d\n norm_op = nn.InstanceNorm2d\n\n norm_op_kwargs = {'eps': 1e-5, 'affine': True}\n dropout_op_kwargs = {'p': 0, 'inplace': True}\n net_nonlin = nn.LeakyReLU\n net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n self.network = Generic_BTS_Netv4_6(self.num_input_channels, self.base_num_features, self.num_classes,\n len(self.net_num_pool_op_kernel_sizes),\n self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,\n dropout_op_kwargs,\n net_nonlin, net_nonlin_kwargs, True, False, lambda x: x,\n InitWeights_He(1e-2),\n self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True,\n True)\n if torch.cuda.is_available():\n self.network.cuda()\n self.network.inference_apply_nonlin = softmax_helper\n\n def initialize_optimizer_and_scheduler(self):\n assert self.network is not None, \"self.initialize_network must be called first\"\n # self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,\n # momentum=0.99, nesterov=True)\n self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay)\n self.lr_scheduler = None\n\n def run_online_evaluation(self, output, target):\n \"\"\"\n due to deep supervision the return value and the reference are now lists of tensors. We only need the full\n resolution output because this is what we are interested in in the end. The others are ignored\n :param output:\n :param target:\n :return:\n \"\"\"\n target = target[0]\n output = output[0]\n return super().run_online_evaluation(output, target)\n\n def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,\n step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,\n validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,\n segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):\n \"\"\"\n We need to wrap this because we need to enforce self.network.do_ds = False for prediction\n \"\"\"\n ds = self.network.do_ds\n self.network.do_ds = False\n ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,\n save_softmax=save_softmax, use_gaussian=use_gaussian,\n overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,\n all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs,\n run_postprocessing_on_folds=run_postprocessing_on_folds)\n\n self.network.do_ds = ds\n return ret\n\n def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,\n mirror_axes: Tuple[int] = None,\n use_sliding_window: bool = True, step_size: float = 0.5,\n use_gaussian: bool = True, pad_border_mode: str = 'constant',\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision=True) -> Tuple[\n np.ndarray, np.ndarray]:\n \"\"\"\n We need to wrap this because we need to enforce self.network.do_ds = False for prediction\n \"\"\"\n ds = self.network.do_ds\n self.network.do_ds = False\n ret = super().predict_preprocessed_data_return_seg_and_softmax(data,\n do_mirroring=do_mirroring,\n mirror_axes=mirror_axes,\n use_sliding_window=use_sliding_window,\n step_size=step_size, use_gaussian=use_gaussian,\n pad_border_mode=pad_border_mode,\n pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,\n verbose=verbose,\n mixed_precision=mixed_precision)\n self.network.do_ds = ds\n return ret\n\n def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):\n \"\"\"\n gradient clipping improves training stability\n\n :param data_generator:\n :param do_backprop:\n :param run_online_evaluation:\n :return:\n \"\"\"\n data_dict = next(data_generator)\n data = data_dict['data']\n target = data_dict['target']\n # x = target[0].clone().detach()\n # target.append(x)\n\n data = maybe_to_torch(data)\n target = maybe_to_torch(target)\n\n if torch.cuda.is_available():\n data = to_cuda(data)\n target = to_cuda(target)\n\n self.optimizer.zero_grad()\n\n if self.fp16:\n with autocast():\n output = self.network(data)\n del data\n l = self.loss(output, target)\n\n if do_backprop:\n self.amp_grad_scaler.scale(l).backward()\n self.amp_grad_scaler.unscale_(self.optimizer)\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.amp_grad_scaler.step(self.optimizer)\n self.amp_grad_scaler.update()\n else:\n output = self.network(data)\n del data\n l = self.loss(output, target)\n\n if do_backprop:\n l.backward()\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.optimizer.step()\n\n if run_online_evaluation:\n self.run_online_evaluation(output, target)\n\n del target\n\n return l.detach().cpu().numpy()\n\n def do_split(self):\n \"\"\"\n The default split is a 5 fold CV on all available training cases. nnU-Net will create a split (it is seeded,\n so always the same) and save it as splits_final.pkl file in the preprocessed data directory.\n Sometimes you may want to create your own split for various reasons. For this you will need to create your own\n splits_final.pkl file. If this file is present, nnU-Net is going to use it and whatever splits are defined in\n it. You can create as many splits in this file as you want. Note that if you define only 4 splits (fold 0-3)\n and then set fold=4 when training (that would be the fifth split), nnU-Net will print a warning and proceed to\n use a random 80:20 data split.\n :return:\n \"\"\"\n if self.fold == \"all\":\n # if fold==all then we use all images for training and validation\n tr_keys = val_keys = list(self.dataset.keys())\n else:\n splits_file = join(self.dataset_directory, \"splits_final.pkl\")\n\n # if the split file does not exist we need to create it\n if not isfile(splits_file):\n self.print_to_log_file(\"Creating new 5-fold cross-validation split...\")\n splits = []\n all_keys_sorted = np.sort(list(self.dataset.keys()))\n kfold = KFold(n_splits=5, shuffle=True, random_state=12345)\n for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):\n train_keys = np.array(all_keys_sorted)[train_idx]\n test_keys = np.array(all_keys_sorted)[test_idx]\n splits.append(OrderedDict())\n splits[-1]['train'] = train_keys\n splits[-1]['val'] = test_keys\n save_pickle(splits, splits_file)\n\n else:\n self.print_to_log_file(\"Using splits from existing split file:\", splits_file)\n splits = load_pickle(splits_file)\n self.print_to_log_file(\"The split file contains %d splits.\" % len(splits))\n\n self.print_to_log_file(\"Desired fold for training: %d\" % self.fold)\n if self.fold < len(splits):\n tr_keys = splits[self.fold]['train']\n val_keys = splits[self.fold]['val']\n self.print_to_log_file(\"This split has %d training and %d validation cases.\"\n % (len(tr_keys), len(val_keys)))\n else:\n self.print_to_log_file(\"INFO: You requested fold %d for training but splits \"\n \"contain only %d folds. I am now creating a \"\n \"random (but seeded) 80:20 split!\" % (self.fold, len(splits)))\n # if we request a fold that is not in the split file, create a random 80:20 split\n rnd = np.random.RandomState(seed=12345 + self.fold)\n keys = np.sort(list(self.dataset.keys()))\n idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False)\n idx_val = [i for i in range(len(keys)) if i not in idx_tr]\n tr_keys = [keys[i] for i in idx_tr]\n val_keys = [keys[i] for i in idx_val]\n self.print_to_log_file(\"This random 80:20 split has %d training and %d validation cases.\"\n % (len(tr_keys), len(val_keys)))\n\n tr_keys.sort()\n val_keys.sort()\n self.dataset_tr = OrderedDict()\n for i in tr_keys:\n self.dataset_tr[i] = self.dataset[i]\n self.dataset_val = OrderedDict()\n for i in val_keys:\n self.dataset_val[i] = self.dataset[i]\n\n def setup_DA_params(self):\n \"\"\"\n - we increase roation angle from [-15, 15] to [-30, 30]\n - scale range is now (0.7, 1.4), was (0.85, 1.25)\n - we don't do elastic deformation anymore\n\n :return:\n \"\"\"\n\n self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(\n np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1]\n\n if self.threeD:\n self.data_aug_params = default_3D_augmentation_params\n self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)\n self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)\n self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)\n if self.do_dummy_2D_aug:\n self.data_aug_params[\"dummy_2D\"] = True\n self.print_to_log_file(\"Using dummy2d data augmentation\")\n self.data_aug_params[\"elastic_deform_alpha\"] = \\\n default_2D_augmentation_params[\"elastic_deform_alpha\"]\n self.data_aug_params[\"elastic_deform_sigma\"] = \\\n default_2D_augmentation_params[\"elastic_deform_sigma\"]\n self.data_aug_params[\"rotation_x\"] = default_2D_augmentation_params[\"rotation_x\"]\n else:\n self.do_dummy_2D_aug = False\n if max(self.patch_size) / min(self.patch_size) > 1.5:\n default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)\n self.data_aug_params = default_2D_augmentation_params\n self.data_aug_params[\"mask_was_used_for_normalization\"] = self.use_mask_for_norm\n\n if self.do_dummy_2D_aug:\n self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],\n self.data_aug_params['rotation_x'],\n self.data_aug_params['rotation_y'],\n self.data_aug_params['rotation_z'],\n self.data_aug_params['scale_range'])\n self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))\n patch_size_for_spatialtransform = self.patch_size[1:]\n else:\n self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],\n self.data_aug_params['rotation_y'],\n self.data_aug_params['rotation_z'],\n self.data_aug_params['scale_range'])\n patch_size_for_spatialtransform = self.patch_size\n\n self.data_aug_params[\"scale_range\"] = (0.7, 1.4)\n self.data_aug_params[\"do_elastic\"] = False\n self.data_aug_params['selected_seg_channels'] = [0]\n self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform\n\n self.data_aug_params[\"num_cached_per_thread\"] = 2\n\n def maybe_update_lr(self, epoch=None):\n \"\"\"\n if epoch is not None we overwrite epoch. Else we use epoch = self.epoch + 1\n\n (maybe_update_lr is called in on_epoch_end which is called before epoch is incremented.\n herefore we need to do +1 here)\n\n :param epoch:\n :return:\n \"\"\"\n if epoch is None:\n ep = self.epoch + 1\n else:\n ep = epoch\n self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9)\n self.print_to_log_file(\"lr:\", np.round(self.optimizer.param_groups[0]['lr'], decimals=6))\n\n def on_epoch_end(self):\n \"\"\"\n overwrite patient-based early stopping. Always run to 1000 epochs\n :return:\n \"\"\"\n super().on_epoch_end()\n continue_training = self.epoch < self.max_num_epochs\n\n # it can rarely happen that the momentum of nnUNetTrainerV2 is too high for some dataset. If at epoch 100 the\n # estimated validation Dice is still 0 then we reduce the momentum from 0.99 to 0.95\n if self.epoch == 100:\n if self.all_val_eval_metrics[-1] == 0:\n self.optimizer.param_groups[0][\"momentum\"] = 0.95\n self.network.apply(InitWeights_He(1e-2))\n self.print_to_log_file(\"At epoch 100, the mean foreground Dice was 0. This can be caused by a too \"\n \"high momentum. High momentum (0.99) is good for datasets where it works, but \"\n \"sometimes causes issues such as this one. Momentum has now been reduced to \"\n \"0.95 and network weights have been reinitialized\")\n return continue_training\n\n def run_training(self):\n \"\"\"\n if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first\n continued epoch with self.initial_lr\n\n we also need to make sure deep supervision in the network is enabled for training, thus the wrapper\n :return:\n \"\"\"\n self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we\n # want at the start of the training\n ds = self.network.do_ds\n self.network.do_ds = True\n ret = super().run_training()\n self.network.do_ds = ds\n return ret\n"
] | [
[
"numpy.array",
"torch.cuda.amp.autocast",
"numpy.random.RandomState",
"numpy.round",
"torch.cuda.is_available",
"sklearn.model_selection.KFold",
"numpy.vstack"
]
] |
kaitlyndlee/plio | [
"99f0852d8eb92efeba72f366077bd023a7da7cdd"
] | [
"plio/io/io_jsc.py"
] | [
"import os\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.common import array_equivalent\n\nfrom plio.utils.utils import file_search\n\n\n# This function reads the lookup tables used to expand metadata from the file names\n# This is separated from parsing the filenames so that for large lists of files the\n# lookup tables don't need to be read over and over\n#\n# Info in the tables is stored in a dict of dataframes so that only one variable\n# (the dict) needs to be passed between functions\ndef read_refdata(LUT_files):\n ID_info = pd.read_csv(LUT_files['ID'], index_col=0)\n spectrometer_info = pd.read_csv(LUT_files['spect'], index_col=0)\n # spectrometer_info.reset_index(inplace=True)\n laser_info = pd.read_csv(LUT_files['laser'], index_col=0)\n # laser_info.reset_index(inplace=True)\n exp_info = pd.read_csv(LUT_files['exp'], index_col=0)\n # exp_info.reset_index(inplace=True)\n sample_info = pd.read_csv(LUT_files['sample'], index_col=0)\n # sample_info.reset_index(inplace=True)\n refdata = {'spect': spectrometer_info, 'laser': laser_info, 'exp': exp_info, 'sample': sample_info, 'ID': ID_info}\n return refdata\n\n\n# This function parses the file names to record metadata related to the observation\ndef jsc_filename_parse(filename, refdata):\n filename = os.path.basename(filename) # strip the path off of the file name\n filename = filename.split('_') # split the file name on underscores\n libs_ID = filename[0]\n laserID = filename[4][0]\n expID = filename[5]\n spectID = filename[6]\n\n try:\n sampleID = refdata['ID'].loc[libs_ID].values[0]\n file_info = pd.DataFrame(refdata['sample'].loc[sampleID])\n if file_info.columns.shape[0] < file_info.index.shape[0]:\n file_info = file_info.T\n if file_info.index.shape[0] > 1:\n print('More than one matching row for ' + sampleID + '!')\n tempID = 'Unknown'\n file_info = pd.DataFrame(refdata['sample'].loc[tempID])\n if file_info.columns.shape[0] < file_info.index.shape[0]:\n file_info = file_info.T\n\n\n except:\n sampleID = 'Unknown'\n file_info = pd.DataFrame(refdata['sample'].loc[sampleID])\n if file_info.columns.shape[0] < file_info.index.shape[0]:\n file_info = file_info.T\n\n file_info['Sample ID'] = sampleID\n file_info['LIBS ID'] = libs_ID\n file_info.reset_index(level=0, inplace=True, drop=True)\n file_info['loc'] = int(filename[1])\n file_info['lab'] = filename[2]\n file_info['gas'] = filename[3][0]\n file_info['pressure'] = float(filename[3][1:])\n\n if laserID in refdata['laser'].index:\n laser_info = pd.DataFrame(refdata['laser'].loc[laserID]).T\n laser_info.index.name = 'Laser Identifier'\n laser_info.reset_index(level=0, inplace=True)\n file_info = pd.concat([file_info, laser_info], axis=1)\n\n file_info['laser_power'] = float(filename[4][1:])\n if expID in refdata['exp'].index:\n exp_info = pd.DataFrame(refdata['exp'].loc[expID]).T\n exp_info.index.name = 'Exp Identifier'\n exp_info.reset_index(level=0, inplace=True)\n file_info = pd.concat([file_info, exp_info], axis=1)\n\n file_info['spectrometer'] = spectID\n if spectID in refdata['spect'].index:\n temp = refdata['spect'].loc[spectID]\n temp = [temp[2], temp[4:]]\n spect_info = pd.DataFrame(refdata['spect'].loc[spectID]).T\n spect_info.index.name = 'Spectrometer Identifier'\n spect_info.reset_index(level=0, inplace=True)\n file_info = pd.concat([file_info, spect_info], axis=1)\n\n return file_info\n\n\ndef JSC(input_files, refdata):\n try:\n # read the first file\n data = pd.read_csv(input_files[0], skiprows=14, sep='\\t', engine='c')\n data = data.rename(columns={data.columns[0]: 'time1', data.columns[1]: 'time2'})\n metadata = pd.concat([jsc_filename_parse(input_files[0], refdata)] * len(data.index))\n metadata.drop('spectrometer', axis=1, inplace=True)\n\n # read the next files and merge them with the first\n for file in input_files[1:]:\n datatemp = pd.read_csv(file, skiprows=14, sep='\\t', engine='c')\n datatemp = datatemp.rename(columns={datatemp.columns[0]: 'time1', datatemp.columns[1]: 'time2'})\n data = data.merge(datatemp)\n\n time = data[['time1', 'time2']] # split the two time columns from the data frame\n data.drop(['time1', 'time2'], axis=1, inplace=True) # trim the data frame so it is just the spectra\n\n # make a multiindex for each wavlength column so they can be easily isolated from metadata later\n data.columns = [['wvl'] * len(data.columns), np.array(data.columns.values, dtype='float').round(4)]\n\n metadata.index = data.index\n metadata = pd.concat([metadata, time], axis=1)\n compcols = ['SiO2', 'TiO2', 'Al2O3', 'Cr2O3', 'Fe2O3T', 'MnO', 'MgO', 'CaO', 'Na2O', 'K2O', 'P2O5',\n 'SO3 LOI Residue', 'Total', 'Total Includes', '%LOI', 'FeO',\n 'Fe2O3', 'SO3 Actual', 'Fe(3+)/Fe(Total)', 'Rb (ug/g)', 'Sr (ug/g)', 'Y (ug/g)', 'Zr (ug/g)',\n 'V (ug/g)', 'Ni (ug/g)', 'Cr (ug/g)',\n 'Nb (ug/g)', 'Ga (ug/g)', 'Cu (ug/g)', 'Zn (ug/g)', 'Co (ug/g)', 'Ba (ug/g)', 'La (ug/g)',\n 'Ce (ug/g)', 'U (ug/g)', 'Th (ug/g)', 'Sc (ug/g)',\n 'Pb (ug/g)', 'Ge (ug/g)', 'As (ug/g)', 'Cl (ug/g)']\n compdata = metadata[compcols]\n metadata.drop(compcols, axis=1, inplace=True)\n metadata.columns = [['meta'] * len(metadata.columns), metadata.columns.values]\n compdata.columns = [['comp'] * len(compdata.columns), compdata.columns.values]\n data = pd.concat([data, metadata, compdata], axis=1)\n\n data[('meta', 'Scan #')] = data.index\n data.set_index(('meta', 'time2'), drop=False, inplace=True)\n\n return data\n except:\n print('Problem reading:' + input_file)\n print('Moving to Problem_Files')\n os.rename(input_file,\n r\"C:\\\\Users\\\\rbanderson\\\\Documents\\\\Projects\\\\LIBS PDART\\\\Database\\\\LIBS USGS\\\\Problem_Files\\\\\" + os.path.basename(\n input_file))\n return None\n\n\ndef jsc_batch(directory, LUT_files, searchstring='*.txt', to_csv=None):\n # Read in the lookup tables to expand filename metadata\n refdata = read_refdata(LUT_files)\n # get the list of files that match the search string in the given directory\n filelist = file_search(directory, searchstring)\n spectIDs = [] # create an empty list to hold the spectrometer IDs\n libsIDs = []\n timestamps = []\n locs = []\n for file in filelist:\n filesplit = os.path.basename(file).split('_')\n spectIDs.append(filesplit[6]) # get the spectrometer IDs for each file in the list\n libsIDs.append(filesplit[0])\n timestamps.append(filesplit[-1].split('.')[0])\n locs.append(filesplit[1])\n spectIDs_unique = np.unique(spectIDs) # get the unique spectrometer IDs\n libsIDs_unique = np.unique(libsIDs)\n dfs = [] # create an empty list to hold the data frames for each spectrometer\n\n # loop through each LIBS ID\n alldata = []\n for ID in libsIDs_unique:\n print('Working on : ' + str(ID))\n sublist = filelist[np.in1d(libsIDs, ID)]\n locs = []\n for file in sublist:\n locs.append(os.path.basename(file).split('_')[1])\n locs_unique = np.unique(locs)\n # loop through each location for that libs ID\n for loc in locs_unique:\n print(loc)\n sub_sublist = sublist[np.in1d(locs, loc)] # get the files for that LIBSID and location\n data = JSC(sub_sublist, refdata)\n alldata.append(data)\n pass\n\n combined = pd.concat(alldata)\n if to_csv is not None:\n print('Writing combined data to: ' + to_csv)\n combined.to_csv(to_csv)\n return combined\n\n\n# got this function from stack overflow: http://stackoverflow.com/questions/14984119/python-pandas-remove-duplicate-columns\n# it's slow but doesn't crash python like combined.T.drop_duplicates().T does in some cases with very large sets of data\ndef duplicate_columns(frame):\n groups = frame.columns.to_series().groupby(frame.dtypes).groups\n dups = []\n\n for t, v in groups.items():\n\n cs = frame[v].columns\n vs = frame[v]\n lcs = len(cs)\n\n for i in range(lcs):\n ia = vs.iloc[:, i].values\n for j in range(i + 1, lcs):\n ja = vs.iloc[:, j].values\n if array_equivalent(ia, ja):\n dups.append(cs[i])\n break\n\n return dups\n"
] | [
[
"numpy.array",
"pandas.DataFrame",
"pandas.core.common.array_equivalent",
"pandas.concat",
"numpy.in1d",
"pandas.read_csv",
"numpy.unique"
]
] |
XinYao1994/HOPE | [
"99b41b457b67d3e5d6dd182f8aa2ce4ea66e4a68"
] | [
"plot/btree/point/lookuplat_mem_email_btree.py"
] | [
"import sys\nimport os\nsys.path.append(os.path.abspath('./plot/'))\nfrom option import *\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plot\nimport matplotlib.ticker as ticker\nimport matplotlib.cm as cm\nimport numpy as np\n\nimport csv\n\nNAMES = [\"Uncompressed\", \"Single\", \"Double\", \"3-Grams, 65536\", \"4-Grams, 65536\", \"ALM-Improved, 8192\", \"ALM-Improved, 65536\"]\nLABELS = [\"Uncompressed\", \"Single\", \"Double\", \"3-Gram\", \"4-Grams\", \"ALM-Improved (4K)\", \"ALM-Improved (64K)\"]\n\nCOLORS = ['#ffffff', '#fff7ec', '#fee8c8', '#fc8d59', '#d7301f', '#7f0000', '#4c0000']\n\nBACKCOLORS = ['#fff7fb', '#ece7f2', '#d0d1e6', '#a6bddb', '#74a9cf', '#3690c0', '#0570b0', '#045a8d', '#023858']\n\nX_LABEL = \"Latency (us)\"\nY_LABEL = \"Memory(MB)\"\n\nX_LIMIT = 3.5\nY_LIMIT = 2400\n\nLEGEND_FONT_SIZE = 10\nLEGEND_POS = 'upper left'\n\nCSV_X_FILE_PATH = \"results/btree/point/final_lookuplat_email_btree.csv\"\nCSV_Y_FILE_PATH = \"results/btree/point/final_mem_email_btree.csv\"\nGRAPH_OUTPUT_PATH = \"figures/btree/point/lat_mem_email_btree_point.pdf\"\n\nf_in_x = open(CSV_X_FILE_PATH)\nreader = csv.reader(f_in_x)\ncsvrows = list(reader)\ndata_x = []\nfor row in csvrows :\n for item in row :\n data_x.append(float(item))\n\nf_in_y = open(CSV_Y_FILE_PATH)\nreader = csv.reader(f_in_y)\ncsvrows = list(reader)\ndata_y = []\nfor row in csvrows :\n for item in row :\n data_y.append(float(item))\n\n#========================================================================================\nmpl.rcParams['ps.useafm'] = True\nmpl.rcParams['pdf.use14corefonts'] = True\nmpl.rcParams['text.usetex'] = False\n\nmpl.rcParams['text.latex.preamble'] = [\n r'\\usepackage{siunitx}', # i need upright \\micro symbols, but you need...\n r'\\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts\n r'\\usepackage{helvet}', # set the normal font here\n r'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet\n r'\\sansmath' # <- tricky! -- gotta actually tell tex to use!\n]\n#========================================================================================\n\nfig = plot.figure(figsize=(GRAPH_WIDTH,GRAPH_HEIGHT))\nax = fig.add_subplot(111)\n\nproduct_max = X_LIMIT * Y_LIMIT * 0.9\nproduct_min = 3\nproduct_diff = product_max - product_min\n\nx_array = []\nx = 0.015\nwhile x < X_LIMIT :\n x_array.append(x)\n x += 0.015\nx_list = np.array(x_array)\n\ny_lists = []\nnum_ranges = 9\nfor i in range(0, num_ranges) :\n y_list = (product_min + (product_diff + 0.0) / num_ranges * i) / x_list\n y_lists.append(y_list)\n\nax.fill_between(x_list, 0, y_lists[0], facecolor=BACKCOLORS[0], edgecolor=BACKCOLORS[0])\nfor i in range(0, len(y_lists)-1) :\n ax.fill_between(x_list, y_lists[i], y_lists[i+1], facecolor=BACKCOLORS[i+1], edgecolor=BACKCOLORS[i+1])\n\nax.scatter(data_x[0], data_y[0], s=MARKER_SIZE, c=COLORS[0], marker=MARKERS[0], linewidths = BORDER_SIZE, edgecolors = BORDER_COLOR, label=NAMES[0], hatch=HATCH*2)\nax.scatter(data_x[1], data_y[1], s=MARKER_SIZE, c=COLORS[1], marker=MARKERS[1], linewidths = BORDER_SIZE, edgecolors = BORDER_COLOR, label=NAMES[1])\nax.scatter(data_x[2], data_y[2], s=MARKER_SIZE, c=COLORS[2], marker=MARKERS[2], linewidths = BORDER_SIZE, edgecolors = BORDER_COLOR, label=NAMES[2])\nax.scatter(data_x[3], data_y[3], s=MARKER_SIZE, c=COLORS[3], marker=MARKERS[3], linewidths = BORDER_SIZE, edgecolors = BORDER_COLOR, label=NAMES[3])\nax.scatter(data_x[4], data_y[4], s=MARKER_SIZE, c=COLORS[4], marker=MARKERS[4], linewidths = BORDER_SIZE, edgecolors = BORDER_COLOR, label=NAMES[4])\nax.scatter(data_x[5], data_y[5], s=MARKER_SIZE, c=COLORS[5], marker=MARKERS[5], linewidths = BORDER_SIZE, edgecolors = BORDER_COLOR, label=NAMES[5])\nax.scatter(data_x[6], data_y[6], s=MARKER_SIZE, c=COLORS[6], marker=MARKERS[6], linewidths = BORDER_SIZE, edgecolors = BORDER_COLOR, label=NAMES[6])\n\nax.set_xlabel(X_LABEL, fontsize=X_LABEL_FONT_SIZE)\nax.set_xlim(0, X_LIMIT)\n\nax.set_ylabel(Y_LABEL, fontsize=Y_LABEL_FONT_SIZE)\nax.set_ylim(0, Y_LIMIT)\n\nx_ticks = [0.5, 1, 1.5, 2, 2.5, 3, 3.5]\nax.set_xticks(x_ticks)\nax.tick_params(axis='x', labelsize=X_TICK_FONT_SIZE)\n\ny_ticks = [0, 400, 800, 1200, 1600, 2000, 2400]\nax.set_yticks(y_ticks)\nax.tick_params(axis='y', labelsize=Y_TICK_FONT_SIZE)\n\n#ax.grid()\n\nax.annotate(LABELS[0], (data_x[0], data_y[0] * 1.07), ha='center', va='center', size=ANNOTATOR_SIZE)\nax.annotate(LABELS[1], (data_x[1] * 0.94, data_y[1] * 1.08), ha='center', va='center', size=ANNOTATOR_SIZE)\nax.annotate(LABELS[2], (data_x[2] * 0.82, data_y[2]), ha='center', va='center', size=ANNOTATOR_SIZE)\nax.annotate(LABELS[3], (data_x[3] * 0.92, data_y[3] * 0.9), ha='center', va='center', size=ANNOTATOR_SIZE)\nax.annotate(LABELS[4], (data_x[4] * 1.1, data_y[4] * 0.9), ha='center', va='center', size=ANNOTATOR_SIZE)\nax.annotate(LABELS[5], (data_x[5] * 1.07, data_y[5] * 1.07), ha='center', va='center', size=ANNOTATOR_SIZE)\nax.annotate(LABELS[6], (data_x[6] * 1.07, data_y[6] * 0.9), ha='center', va='center', size=ANNOTATOR_SIZE)\n\nplot.savefig(GRAPH_OUTPUT_PATH, bbox_inches='tight')\n"
] | [
[
"matplotlib.use",
"matplotlib.pyplot.savefig",
"numpy.array",
"matplotlib.pyplot.figure"
]
] |
sharkweek/lasso-python | [
"727957a8b9cf04240b25d18ab730153b9ab8db24"
] | [
"lasso/plotting/plotting.py"
] | [
"\n\nimport os\nimport io\nimport uuid\nimport json\nimport numpy as np\nfrom base64 import b64encode\nfrom zipfile import ZipFile, ZIP_DEFLATED\nfrom typing import Union, Tuple\n\n\ndef _read_file(filepath: str):\n '''This function reads file as str\n\n Parameters\n ----------\n filename : str\n filepath of the file to read as string\n\n Returns\n -------\n file_content : str\n '''\n\n with open(filepath, \"r\") as fp:\n return fp.read()\n\n\ndef plot_shell_mesh(node_coordinates: np.ndarray,\n shell_node_indexes: np.ndarray,\n field: Union[np.ndarray, None] = None,\n is_element_field: bool = True,\n fringe_limits: Union[Tuple[float, float], None] = None):\n ''' Plot a mesh\n\n Parameters\n ----------\n node_coordinates : np.ndarray\n array of node coordinates for elements\n shell_node_indexes : np.ndarray\n node indexes of shells\n field : Union[np.ndarray, None]\n Array containing a field value for every element or node\n is_element_field : bool\n if the specified field is for elements or nodes\n fringe_limits : Union[Tuple[float, float], None]\n limits for the fringe bar. Set by default to min and max.\n\n Returns\n -------\n html : str\n html code for plotting as string\n '''\n\n assert(node_coordinates.ndim == 2)\n assert(node_coordinates.shape[1] == 3)\n assert(shell_node_indexes.ndim == 2)\n assert(shell_node_indexes.shape[1] in [3, 4])\n if isinstance(field, np.ndarray):\n assert(field.ndim == 1)\n if is_element_field:\n assert(field.shape[0] == shell_node_indexes.shape[0])\n else:\n assert(field.shape[0] == node_coordinates.shape[0])\n\n # cast types correctly\n # the types MUST be float32\n node_coordinates = node_coordinates.astype(np.float32)\n if isinstance(field, np.ndarray):\n field = field.astype(np.float32)\n\n # distinguish tria and quads\n is_quad = shell_node_indexes[:, 2] != shell_node_indexes[:, 3]\n is_tria = np.logical_not(is_quad)\n\n # seperate tria and quads ... I know its sad :(\n tria_node_indexes = shell_node_indexes[is_tria][:, :3]\n quad_node_indexes = shell_node_indexes[is_quad]\n\n # we can only plot tria, therefore we need to split quads\n # into two trias\n quad_node_indexes_tria1 = quad_node_indexes[:, :3]\n # quad_node_indexes_tria2 = quad_node_indexes[:, [True, False, True, True]]\n quad_node_indexes_tria2 = quad_node_indexes[:, [0, 2, 3]]\n\n # assemble elements for plotting\n # This seems to take a lot of memory and you are right thinking this,\n # the issue is just in order to plot fringe values, we need to output\n # the element values at the 3 corner nodes. Since elements share nodes\n # we can not use the same nodes, thus we need to create multiple nodes\n # at the same position but with different fringe.\n nodes_xyz = np.concatenate([\n node_coordinates[tria_node_indexes].reshape((-1, 3)),\n node_coordinates[quad_node_indexes_tria1].reshape((-1, 3)),\n node_coordinates[quad_node_indexes_tria2].reshape((-1, 3))\n ])\n\n # fringe value and hover title\n if isinstance(field, np.ndarray):\n\n if is_element_field:\n n_shells = len(shell_node_indexes)\n n_tria = np.sum(is_tria)\n n_quads = n_shells - n_tria\n\n # split field according to elements\n field_tria = field[is_tria]\n field_quad = field[is_quad]\n\n # allocate fringe array\n node_fringe = np.zeros(\n (len(field_tria) + 2 * len(field_quad), 3), dtype=np.float32)\n\n # set fringe values\n node_fringe[:n_tria, 0] = field_tria\n node_fringe[:n_tria, 1] = field_tria\n node_fringe[:n_tria, 2] = field_tria\n\n node_fringe[n_tria:n_tria + n_quads, 0] = field_quad\n node_fringe[n_tria:n_tria + n_quads, 1] = field_quad\n node_fringe[n_tria:n_tria + n_quads, 2] = field_quad\n\n node_fringe[n_tria + n_quads:n_tria +\n 2 * n_quads, 0] = field_quad\n node_fringe[n_tria + n_quads:n_tria +\n 2 * n_quads, 1] = field_quad\n node_fringe[n_tria + n_quads:n_tria +\n 2 * n_quads, 2] = field_quad\n\n # flatty paddy\n node_fringe = node_fringe.flatten()\n else:\n # copy & paste ftw\n node_fringe = np.concatenate([\n field[tria_node_indexes].reshape((-1, 3)),\n field[quad_node_indexes_tria1].reshape((-1, 3)),\n field[quad_node_indexes_tria2].reshape((-1, 3))\n ])\n node_fringe = node_fringe.flatten()\n\n # element text\n node_txt = [str(entry) for entry in node_fringe.flatten()]\n else:\n node_fringe = np.zeros(len(nodes_xyz), dtype=np.float32)\n node_txt = [''] * len(nodes_xyz)\n\n # zip compression of data for HTML (reduces size)\n zdata = io.BytesIO()\n with ZipFile(zdata, 'w', compression=ZIP_DEFLATED) as zipFile:\n zipFile.writestr('/intensities', node_fringe.tostring())\n zipFile.writestr('/positions', nodes_xyz.tostring())\n zipFile.writestr('/text', json.dumps(node_txt))\n zdata = b64encode(zdata.getvalue()).decode('utf-8')\n\n # read html template\n _html_template = _read_file(os.path.join(\n os.path.dirname(__file__), 'resources', 'template.html'))\n\n # format html template file\n min_value = 0\n max_value = 0\n if fringe_limits:\n min_value = fringe_limits[0]\n max_value = fringe_limits[1]\n elif isinstance(field, np.ndarray):\n min_value = field.min()\n max_value = field.max()\n\n _html_div = _html_template.format(div_id=uuid.uuid4(),\n lowIntensity=min_value,\n highIntensity=max_value,\n zdata=zdata)\n\n # wrap it up with all needed js libraries\n _html_jszip_js = '<script type=\"text/javascript\">%s</script>' % _read_file(\n os.path.join(os.path.dirname(__file__), 'resources', 'jszip.min.js'))\n _html_three_js = '<script type=\"text/javascript\">%s</script>' % _read_file(\n os.path.join(os.path.dirname(__file__), 'resources', 'three.min.js'))\n _html_chroma_js = '<script type=\"text/javascript\">%s</script>' % _read_file(\n os.path.join(os.path.dirname(__file__), 'resources', 'chroma.min.js'))\n _html_jquery_js = '<script type=\"text/javascript\">%s</script>' % _read_file(\n os.path.join(os.path.dirname(__file__), 'resources', 'jquery.min.js'))\n\n return '''\n<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\" />\n {_jquery_js}\n {_jszip_js}\n {_three_js}\n {_chroma_js}\n </head>\n <body>\n {_html_div}\n </body>\n</html>'''.format(\n _html_div=_html_div,\n _jszip_js=_html_jszip_js,\n _three_js=_html_three_js,\n _chroma_js=_html_chroma_js,\n _jquery_js=_html_jquery_js)\n"
] | [
[
"numpy.logical_not",
"numpy.sum"
]
] |
cgross95/ann-benchmarks | [
"5502f2930e1dd88c193178eb8a0a91f487e26e0c"
] | [
"ann_benchmarks/datasets.py"
] | [
"import h5py\nimport numpy\nimport os\nimport random\nimport csv\n\nfrom urllib.request import urlopen\nfrom urllib.request import urlretrieve\n\nfrom ann_benchmarks.distance import dataset_transform\n\n\ndef download(src, dst):\n if not os.path.exists(dst):\n # TODO: should be atomic\n print('downloading %s -> %s...' % (src, dst))\n urlretrieve(src, dst)\n\n\ndef get_dataset_fn(dataset):\n if not os.path.exists('data'):\n os.mkdir('data')\n return os.path.join('data', '%s.hdf5' % dataset)\n\n\ndef get_dataset(which):\n hdf5_fn = get_dataset_fn(which)\n try:\n url = 'http://ann-benchmarks.com/%s.hdf5' % which\n download(url, hdf5_fn)\n except:\n print(\"Cannot download %s\" % url)\n if which in DATASETS:\n print(\"Creating dataset locally\")\n DATASETS[which](hdf5_fn)\n hdf5_f = h5py.File(hdf5_fn, 'r')\n\n # here for backward compatibility, to ensure old datasets can still be used with newer versions\n # cast to integer because the json parser (later on) cannot interpret numpy integers\n dimension = int(hdf5_f.attrs['dimension']) if 'dimension' in hdf5_f.attrs else len(hdf5_f['train'][0])\n num_elements = int(hdf5_f.attrs['num_elements']) if 'num_elements' in hdf5_f.attrs else len(hdf5_f['train'])\n\n return hdf5_f, dimension, num_elements\n\n\n# Everything below this line is related to creating datasets\n# You probably never need to do this at home,\n# just rely on the prepared datasets at http://ann-benchmarks.com\n\ndef write_dynamic_output(train, fn, distance, point_type='float',\n radius=0.1, step=50):\n # step: distance between each nearest neighbor calculation\n # radius: what percentage of training data to use as k\n from ann_benchmarks.algorithms.bruteforce import BruteForceBLAS\n n = 0\n f = h5py.File(fn, 'w')\n f.attrs['type'] = 'dense'\n f.attrs['distance'] = distance\n f.attrs['dimension'] = len(train[0])\n f.attrs['num_elements'] = len(train)\n f.attrs['point_type'] = point_type\n f.attrs['radius'] = radius\n f.attrs['step'] = step\n print('train size: %9d * %4d' % train.shape)\n print('a nearest neighbor will be calculated every %d points' % step)\n print('%f%% of the points seen at each step will be used as the number of neighbors'\n % (100 * radius))\n f.create_dataset('train', (len(train), len(\n train[0])), dtype=train.dtype)[:] = train\n num_test = (len(train) // step) - 1\n max_count = int(num_test * step * radius)\n neighbors = f.create_dataset('neighbors', (num_test, max_count), dtype='i')\n distances = f.create_dataset('distances', (num_test, max_count), dtype='f')\n bf = BruteForceBLAS(distance, precision=train.dtype)\n\n for idx, i in enumerate(range(step, len(train), step)):\n bf.fit(train[:i])\n x = train[i]\n count = int(radius * i)\n if i % 1000 == 0:\n print('%d/%d...' % (i, len(train)))\n res = list(bf.query_with_distances(x, count))\n res.sort(key=lambda t: t[-1])\n neighbors[idx] = [j for j, _ in res] + [-1] * (max_count - len(res))\n distances[idx] = [d for _, d in res]\\\n + [float('inf')] * (max_count - len(res))\n f.close()\n\n\ndef write_output(train, test, fn, distance, point_type='float', count=100):\n from ann_benchmarks.algorithms.bruteforce import BruteForceBLAS\n n = 0\n f = h5py.File(fn, 'w')\n f.attrs['type'] = 'dense'\n f.attrs['distance'] = distance\n f.attrs['dimension'] = len(train[0])\n f.attrs['point_type'] = point_type\n print('train size: %9d * %4d' % train.shape)\n print('test size: %9d * %4d' % test.shape)\n f.create_dataset('train', (len(train), len(\n train[0])), dtype=train.dtype)[:] = train\n f.create_dataset('test', (len(test), len(\n test[0])), dtype=test.dtype)[:] = test\n neighbors = f.create_dataset('neighbors', (len(test), count), dtype='i')\n distances = f.create_dataset('distances', (len(test), count), dtype='f')\n bf = BruteForceBLAS(distance, precision=train.dtype)\n\n bf.fit(train)\n for i, x in enumerate(test):\n if i % 1000 == 0:\n print('%d/%d...' % (i, len(test)))\n res = list(bf.query_with_distances(x, count))\n res.sort(key=lambda t: t[-1])\n neighbors[i] = [j for j, _ in res]\n distances[i] = [d for _, d in res]\n f.close()\n\n\"\"\"\nparam: train and test are arrays of arrays of indices.\n\"\"\"\ndef write_sparse_output(train, test, fn, distance, dimension, count=100):\n from ann_benchmarks.algorithms.bruteforce import BruteForceBLAS\n f = h5py.File(fn, 'w')\n f.attrs['type'] = 'sparse'\n f.attrs['distance'] = distance\n f.attrs['dimension'] = dimension\n f.attrs['point_type'] = 'bit'\n print('train size: %9d * %4d' % (train.shape[0], dimension))\n print('test size: %9d * %4d' % (test.shape[0], dimension))\n\n # We ensure the sets are sorted\n train = numpy.array(list(map(sorted, train)))\n test = numpy.array(list(map(sorted, test)))\n\n flat_train = numpy.hstack(train.flatten())\n flat_test = numpy.hstack(test.flatten())\n\n f.create_dataset('train', (len(flat_train),), dtype=flat_train.dtype)[:] = flat_train\n f.create_dataset('test', (len(flat_test),), dtype=flat_test.dtype)[:] = flat_test\n neighbors = f.create_dataset('neighbors', (len(test), count), dtype='i')\n distances = f.create_dataset('distances', (len(test), count), dtype='f')\n\n f.create_dataset('size_test', (len(test),), dtype='i')[:] = list(map(len, test))\n f.create_dataset('size_train', (len(train),), dtype='i')[:] = list(map(len, train))\n\n bf = BruteForceBLAS(distance, precision=train.dtype)\n bf.fit(train)\n for i, x in enumerate(test):\n if i % 1000 == 0:\n print('%d/%d...' % (i, len(test)))\n res = list(bf.query_with_distances(x, count))\n res.sort(key=lambda t: t[-1])\n neighbors[i] = [j for j, _ in res]\n distances[i] = [d for _, d in res]\n f.close()\n\ndef train_test_split(X, test_size=10000, dimension=None):\n import sklearn.model_selection\n if dimension == None:\n dimension = X.shape[1]\n print('Splitting %d*%d into train/test' % (X.shape[0], dimension))\n return sklearn.model_selection.train_test_split(\n X, test_size=test_size, random_state=1)\n\n\ndef glove(out_fn, d):\n import zipfile\n\n url = 'http://nlp.stanford.edu/data/glove.twitter.27B.zip'\n fn = os.path.join('data', 'glove.twitter.27B.zip')\n download(url, fn)\n with zipfile.ZipFile(fn) as z:\n print('preparing %s' % out_fn)\n z_fn = 'glove.twitter.27B.%dd.txt' % d\n X = []\n for line in z.open(z_fn):\n v = [float(x) for x in line.strip().split()[1:]]\n X.append(numpy.array(v))\n X_train, X_test = train_test_split(X)\n write_output(numpy.array(X_train), numpy.array(\n X_test), out_fn, 'angular')\n\n\ndef _load_texmex_vectors(f, n, k):\n import struct\n\n v = numpy.zeros((n, k))\n for i in range(n):\n f.read(4) # ignore vec length\n v[i] = struct.unpack('f' * k, f.read(k * 4))\n\n return v\n\n\ndef _get_irisa_matrix(t, fn):\n import struct\n m = t.getmember(fn)\n f = t.extractfile(m)\n k, = struct.unpack('i', f.read(4))\n n = m.size // (4 + 4 * k)\n f.seek(0)\n return _load_texmex_vectors(f, n, k)\n\n\ndef sift(out_fn):\n import tarfile\n\n url = 'ftp://ftp.irisa.fr/local/texmex/corpus/sift.tar.gz'\n fn = os.path.join('data', 'sift.tar.tz')\n download(url, fn)\n with tarfile.open(fn, 'r:gz') as t:\n train = _get_irisa_matrix(t, 'sift/sift_base.fvecs')\n test = _get_irisa_matrix(t, 'sift/sift_query.fvecs')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef gist(out_fn):\n import tarfile\n\n url = 'ftp://ftp.irisa.fr/local/texmex/corpus/gist.tar.gz'\n fn = os.path.join('data', 'gist.tar.tz')\n download(url, fn)\n with tarfile.open(fn, 'r:gz') as t:\n train = _get_irisa_matrix(t, 'gist/gist_base.fvecs')\n test = _get_irisa_matrix(t, 'gist/gist_query.fvecs')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef _load_mnist_vectors(fn):\n import gzip\n import struct\n\n print('parsing vectors in %s...' % fn)\n f = gzip.open(fn)\n type_code_info = {\n 0x08: (1, \"!B\"),\n 0x09: (1, \"!b\"),\n 0x0B: (2, \"!H\"),\n 0x0C: (4, \"!I\"),\n 0x0D: (4, \"!f\"),\n 0x0E: (8, \"!d\")\n }\n magic, type_code, dim_count = struct.unpack(\"!hBB\", f.read(4))\n assert magic == 0\n assert type_code in type_code_info\n\n dimensions = [struct.unpack(\"!I\", f.read(4))[0]\n for i in range(dim_count)]\n\n entry_count = dimensions[0]\n entry_size = numpy.product(dimensions[1:])\n\n b, format_string = type_code_info[type_code]\n vectors = []\n for i in range(entry_count):\n vectors.append([struct.unpack(format_string, f.read(b))[0]\n for j in range(entry_size)])\n return numpy.array(vectors)\n\n\ndef mnist(out_fn):\n download(\n 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', 'mnist-train.gz') # noqa\n download(\n 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', 'mnist-test.gz') # noqa\n train = _load_mnist_vectors('mnist-train.gz')\n test = _load_mnist_vectors('mnist-test.gz')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef fashion_mnist(out_fn):\n download('http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz', # noqa\n 'fashion-mnist-train.gz')\n download('http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz', # noqa\n 'fashion-mnist-test.gz')\n train = _load_mnist_vectors('fashion-mnist-train.gz')\n test = _load_mnist_vectors('fashion-mnist-test.gz')\n write_output(train, test, out_fn, 'euclidean')\n\n# Creates a 'deep image descriptor' dataset using the 'deep10M.fvecs' sample\n# from http://sites.skoltech.ru/compvision/noimi/. The download logic is adapted\n# from the script https://github.com/arbabenko/GNOIMI/blob/master/downloadDeep1B.py.\ndef deep_image(out_fn):\n yadisk_key = 'https://yadi.sk/d/11eDCm7Dsn9GA'\n response = urlopen('https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key=' \\\n + yadisk_key + '&path=/deep10M.fvecs')\n response_body = response.read().decode(\"utf-8\")\n\n dataset_url = response_body.split(',')[0][9:-1]\n filename = os.path.join('data', 'deep-image.fvecs')\n download(dataset_url, filename)\n\n # In the fvecs file format, each vector is stored by first writing its\n # length as an integer, then writing its components as floats.\n fv = numpy.fromfile(filename, dtype=numpy.float32)\n dim = fv.view(numpy.int32)[0]\n fv = fv.reshape(-1, dim + 1)[:, 1:]\n\n X_train, X_test = train_test_split(fv)\n write_output(X_train, X_test, out_fn, 'angular')\n\ndef transform_bag_of_words(filename, n_dimensions, out_fn):\n import gzip\n from scipy.sparse import lil_matrix\n from sklearn.feature_extraction.text import TfidfTransformer\n from sklearn import random_projection\n with gzip.open(filename, 'rb') as f:\n file_content = f.readlines()\n entries = int(file_content[0])\n words = int(file_content[1])\n file_content = file_content[3:] # strip first three entries\n print(\"building matrix...\")\n A = lil_matrix((entries, words))\n for e in file_content:\n doc, word, cnt = [int(v) for v in e.strip().split()]\n A[doc - 1, word - 1] = cnt\n print(\"normalizing matrix entries with tfidf...\")\n B = TfidfTransformer().fit_transform(A)\n print(\"reducing dimensionality...\")\n C = random_projection.GaussianRandomProjection(\n n_components=n_dimensions).fit_transform(B)\n X_train, X_test = train_test_split(C)\n write_output(numpy.array(X_train), numpy.array(\n X_test), out_fn, 'angular')\n\n\ndef nytimes(out_fn, n_dimensions):\n fn = 'nytimes_%s.txt.gz' % n_dimensions\n download('https://archive.ics.uci.edu/ml/machine-learning-databases/bag-of-words/docword.nytimes.txt.gz', fn) # noqa\n transform_bag_of_words(fn, n_dimensions, out_fn)\n\n\ndef random_float(out_fn, n_dims, n_samples, centers, distance):\n import sklearn.datasets\n\n X, _ = sklearn.datasets.make_blobs(\n n_samples=n_samples, n_features=n_dims,\n centers=centers, random_state=1)\n X_train, X_test = train_test_split(X, test_size=0.1)\n write_output(X_train, X_test, out_fn, distance)\n\n\ndef random_bitstring(out_fn, n_dims, n_samples, n_queries):\n import sklearn.datasets\n\n Y, _ = sklearn.datasets.make_blobs(\n n_samples=n_samples, n_features=n_dims,\n centers=n_queries, random_state=1)\n X = numpy.zeros((n_samples, n_dims), dtype=numpy.bool)\n for i, vec in enumerate(Y):\n X[i] = numpy.array([v > 0 for v in vec], dtype=numpy.bool)\n\n X_train, X_test = train_test_split(X, test_size=n_queries)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\n\ndef word2bits(out_fn, path, fn):\n import tarfile\n local_fn = fn + '.tar.gz'\n url = 'http://web.stanford.edu/~maxlam/word_vectors/compressed/%s/%s.tar.gz' % ( # noqa\n path, fn)\n download(url, local_fn)\n print('parsing vectors in %s...' % local_fn)\n with tarfile.open(local_fn, 'r:gz') as t:\n f = t.extractfile(fn)\n n_words, k = [int(z) for z in next(f).strip().split()]\n X = numpy.zeros((n_words, k), dtype=numpy.bool)\n for i in range(n_words):\n X[i] = numpy.array([float(z) > 0 for z in next(\n f).strip().split()[1:]], dtype=numpy.bool)\n\n X_train, X_test = train_test_split(X, test_size=1000)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\n\ndef sift_hamming(out_fn, fn):\n import tarfile\n local_fn = fn + '.tar.gz'\n url = 'http://sss.projects.itu.dk/ann-benchmarks/datasets/%s.tar.gz' % fn\n download(url, local_fn)\n print('parsing vectors in %s...' % local_fn)\n with tarfile.open(local_fn, 'r:gz') as t:\n f = t.extractfile(fn)\n lines = f.readlines()\n X = numpy.zeros((len(lines), 256), dtype=numpy.bool)\n for i, line in enumerate(lines):\n X[i] = numpy.array(\n [int(x) > 0 for x in line.decode().strip()], dtype=numpy.bool)\n X_train, X_test = train_test_split(X, test_size=1000)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\ndef kosarak(out_fn):\n import gzip\n local_fn = 'kosarak.dat.gz'\n # only consider sets with at least min_elements many elements\n min_elements = 20\n url = 'http://fimi.uantwerpen.be/data/%s' % local_fn\n download(url, local_fn)\n\n X = []\n dimension = 0\n with gzip.open('kosarak.dat.gz', 'r') as f:\n content = f.readlines()\n # preprocess data to find sets with more than 20 elements\n # keep track of used ids for reenumeration\n for line in content:\n if len(line.split()) >= min_elements:\n X.append(list(map(int, line.split())))\n dimension = max(dimension, max(X[-1]) + 1)\n\n X_train, X_test = train_test_split(numpy.array(X), test_size=500, dimension=dimension)\n write_sparse_output(X_train, X_test, out_fn, 'jaccard', dimension)\n\ndef random_jaccard(out_fn, n=10000, size=50, universe=80):\n random.seed(1)\n l = list(range(universe))\n X = []\n for i in range(n):\n X.append(random.sample(l, size))\n\n X_train, X_test = train_test_split(numpy.array(X), test_size=100, dimension=universe)\n write_sparse_output(X_train, X_test, out_fn, 'jaccard', universe)\n\n\n\ndef lastfm(out_fn, n_dimensions, test_size=50000):\n # This tests out ANN methods for retrieval on simple matrix factorization\n # based recommendation algorithms. The idea being that the query/test\n # vectors are user factors and the train set are item factors from\n # the matrix factorization model.\n\n # Since the predictor is a dot product, we transform the factors first\n # as described in this\n # paper: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/XboxInnerProduct.pdf # noqa\n # This hopefully replicates the experiments done in this post:\n # http://www.benfrederickson.com/approximate-nearest-neighbours-for-recommender-systems/ # noqa\n\n # The dataset is from \"Last.fm Dataset - 360K users\":\n # http://www.dtic.upf.edu/~ocelma/MusicRecommendationDataset/lastfm-360K.html # noqa\n\n # This requires the implicit package to generate the factors\n # (on my desktop/gpu this only takes 4-5 seconds to train - but\n # could take 1-2 minutes on a laptop)\n from implicit.datasets.lastfm import get_lastfm\n from implicit.approximate_als import augment_inner_product_matrix\n import implicit\n\n # train an als model on the lastfm data\n _, _, play_counts = get_lastfm()\n model = implicit.als.AlternatingLeastSquares(factors=n_dimensions)\n model.fit(implicit.nearest_neighbours.bm25_weight(\n play_counts, K1=100, B=0.8))\n\n # transform item factors so that each one has the same norm,\n # and transform the user factors such by appending a 0 column\n _, item_factors = augment_inner_product_matrix(model.item_factors)\n user_factors = numpy.append(model.user_factors,\n numpy.zeros((model.user_factors.shape[0], 1)),\n axis=1)\n\n # only query the first 50k users (speeds things up signficantly\n # without changing results)\n user_factors = user_factors[:test_size]\n\n # after that transformation a cosine lookup will return the same results\n # as the inner product on the untransformed data\n write_output(item_factors, user_factors, out_fn, 'angular')\n\n\ndef siemens_static(out_fn, dataset, step=50):\n # dataset is a string in ['SHERPA', 'SHERPA_100000', 'OLHC', 'AS']\n # step is distance between query points in dataset\n if dataset not in ['SHERPA', 'SHERPA_100000', 'OLHC', 'AS']:\n dataset = 'OLHC' # default\n\n if not os.path.exists(out_fn):\n X_test = []\n X_train = []\n csv_fn = os.path.join('data', '%s.csv' % dataset)\n with open(csv_fn, newline='') as csv_f:\n reader = csv.reader(csv_f)\n reader.__next__() # Burn header row\n for i, row in enumerate(reader):\n if i > 0 and i % step == 0:\n # Assume first two columns are not features\n X_test.append(list(map(float, row[2:])))\n else:\n # Assume first two columns are not features\n X_train.append(list(map(float, row[2:])))\n write_output(numpy.array(X_train), numpy.array(X_test),\n out_fn, 'euclidean')\n\n\ndef siemens_dynamic(out_fn, dataset, radius=0.1, step=50):\n # dataset is a string in ['SHERPA', 'SHERPA_100000', 'OLHC', 'AS']\n # step is distance between query points in dataset\n if dataset not in ['SHERPA', 'SHERPA_100000', 'OLHC', 'AS']:\n dataset = 'OLHC' # default\n\n if not os.path.exists(out_fn):\n X = []\n csv_fn = os.path.join('data', '%s.csv' % dataset)\n with open(csv_fn, newline='') as csv_f:\n reader = csv.reader(csv_f)\n reader.__next__() # Burn header row\n max_points = -1\n for i, row in enumerate(reader):\n if i == max_points:\n break\n # Assume first two columns are not features\n X.append(list(map(float, row[2:])))\n write_dynamic_output(numpy.array(X, dtype=numpy.float32), out_fn, 'euclidean',\n radius=radius, step=step)\n\n\nDATASETS = {\n 'deep-image-96-angular': deep_image,\n 'fashion-mnist-784-euclidean': fashion_mnist,\n 'gist-960-euclidean': gist,\n 'glove-25-angular': lambda out_fn: glove(out_fn, 25),\n 'glove-50-angular': lambda out_fn: glove(out_fn, 50),\n 'glove-100-angular': lambda out_fn: glove(out_fn, 100),\n 'glove-200-angular': lambda out_fn: glove(out_fn, 200),\n 'mnist-784-euclidean': mnist,\n 'random-xs-20-euclidean': lambda out_fn: random_float(out_fn, 20, 10000, 100,\n 'euclidean'),\n 'random-s-100-euclidean': lambda out_fn: random_float(out_fn, 100, 100000, 1000,\n 'euclidean'),\n 'random-xs-20-angular': lambda out_fn: random_float(out_fn, 20, 10000, 100,\n 'angular'),\n 'random-s-100-angular': lambda out_fn: random_float(out_fn, 100, 100000, 1000,\n 'angular'),\n 'random-xs-16-hamming': lambda out_fn: random_bitstring(out_fn, 16, 10000,\n 100),\n 'random-s-128-hamming': lambda out_fn: random_bitstring(out_fn, 128,\n 50000, 1000),\n 'random-l-256-hamming': lambda out_fn: random_bitstring(out_fn, 256,\n 100000, 1000),\n 'random-s-jaccard': lambda out_fn: random_jaccard(out_fn, n=10000,\n size=20, universe=40),\n 'random-l-jaccard': lambda out_fn: random_jaccard(out_fn, n=100000,\n size=70, universe=100),\n 'sift-128-euclidean': sift,\n 'nytimes-256-angular': lambda out_fn: nytimes(out_fn, 256),\n 'nytimes-16-angular': lambda out_fn: nytimes(out_fn, 16),\n 'word2bits-800-hamming': lambda out_fn: word2bits(\n out_fn, '400K',\n 'w2b_bitlevel1_size800_vocab400K'),\n 'lastfm-64-dot': lambda out_fn: lastfm(out_fn, 64),\n 'sift-256-hamming': lambda out_fn: sift_hamming(\n out_fn, 'sift.hamming.256'),\n 'kosarak-jaccard': lambda out_fn: kosarak(out_fn),\n 'siemens-sherpa-static': lambda out_fn: siemens_static(out_fn, 'SHERPA'),\n 'siemens-big-sherpa-static': lambda out_fn: siemens_static(\n out_fn, 'SHERPA_100000'),\n 'siemens-olhc-static': lambda out_fn: siemens_static(out_fn, 'OLHC'),\n 'siemens-as-static': lambda out_fn: siemens_static(out_fn, 'AS'),\n 'siemens-sherpa': lambda out_fn: siemens_dynamic(out_fn, 'SHERPA'),\n 'siemens-big-sherpa': lambda out_fn: siemens_dynamic(\n out_fn, 'SHERPA_100000'),\n 'siemens-olhc': lambda out_fn: siemens_dynamic(out_fn, 'OLHC'),\n 'siemens-as': lambda out_fn: siemens_dynamic(out_fn, 'AS')\n}\n"
] | [
[
"numpy.product",
"numpy.array",
"numpy.zeros",
"sklearn.random_projection.GaussianRandomProjection",
"numpy.fromfile",
"scipy.sparse.lil_matrix",
"sklearn.feature_extraction.text.TfidfTransformer"
]
] |
gehilley/NondimensionalWeathering | [
"8b59a00d59c026e1b3a3b8f7c3c6fe51a272c21a"
] | [
"weathering_model/utils.py"
] | [
"def pack_values(values, packing_geometry=None):\n \"\"\"\n pack_values: packs and unpacks values into vectors suitable for ODE integrators:\n\n Parameters:\n -----------\n values : ndarray of values to unpack / pack. Array is n x m if values are to be packed.\n packing_geometry : A tuple of the output size of the packed values. If packing_geometry is None, values will be\n packed into an ((n*m) x 1) vector. Values will be repacked row-wise (Fortran order).\n \"\"\"\n\n import numpy as np\n packing_geometry = (np.prod(values.shape),) if packing_geometry is None else packing_geometry\n assert(np.prod(values.shape) == np.prod(np.array(packing_geometry)))\n return np.reshape(values, packing_geometry, order='F')\n\ndef plot_models(filename, out_prefix, save_plots = False, plot_symbols = None, plot_colors = None, t_indexes = None):\n\n import pickle as p\n import matplotlib.pylab as plt\n import numpy as np\n\n (x, X, Y, L_star, Y0_star, v_star, nx, t_star, dx_star) = p.load(open(filename, 'rb'))\n\n plot_symbols = ['-' for i in range(len(t_star))] if plot_symbols is None else plot_symbols\n plot_indexes = t_indexes if t_indexes is not None else range(len(t_star))\n plot_colors = plot_colors if plot_colors is not None else ['r', 'b']\n\n plt.figure()\n plt.title(out_prefix)\n\n for (i, plot_symbol) in zip(plot_indexes, plot_symbols):\n plt.plot(x, X[i, :], plot_symbol, color=plot_colors[0], linewidth=1.0)\n plt.plot(x, Y[i, :], plot_symbol, color=plot_colors[1], linewidth=1.0)\n\n ax = plt.gca()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n plt.xlabel('$x^{*}$')\n plt.ylabel('$X^{*}$, $Y^{*}$')\n plt.axis([0, np.max(x), 0, 1.1])\n\n\n\n if save_plots:\n plt.savefig(out_prefix+'_chem.eps')\n else:\n plt.show()\n\ndef plot_cracking_models(filename, out_prefix, save_plots = False, plot_symbols = None, plot_colors = None, t_indexes = None, upper_L_crack = 7.4E-6, lower_L_crack = 1.5E-8):\n import pickle as p\n import matplotlib.pylab as plt\n import numpy as np\n\n import warnings\n\n warnings.filterwarnings(\"ignore\")\n\n (x, X, Y, L_star, Y0_star, v_star, nx, t_star, dx_star) = p.load(open(filename, 'rb'))\n\n plot_symbols = ['-' for i in range(len(t_star))] if plot_symbols is None else plot_symbols\n plot_indexes = t_indexes if t_indexes is not None else range(len(t_star))\n\n plt.figure()\n plt.title(out_prefix)\n\n for (i, plot_symbol) in zip(plot_indexes, plot_symbols):\n L_crack = np.power(Y0_star,2) / np.power(1-X[i,:],2)\n plt.semilogy(x, L_crack, 'k' + plot_symbol, linewidth=1.0)\n plt.semilogy([0, max(x)], [upper_L_crack, upper_L_crack], 'k--', linewidth=1.0)\n plt.semilogy([0, max(x)], [lower_L_crack, lower_L_crack], 'k:', linewidth=1.0)\n\n ax = plt.gca()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n plt.xlabel('$x^{*}$')\n plt.ylabel('$L^{*}$')\n plt.axis([0, np.max(x), 1E-8, 1E-4])\n\n if save_plots:\n plt.savefig(out_prefix + '_crack.eps')\n else:\n plt.show()\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.reshape",
"matplotlib.pylab.savefig",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.semilogy",
"matplotlib.pylab.show",
"matplotlib.pylab.figure",
"matplotlib.pylab.xlabel",
"numpy.prod",
"numpy.power",
"matplotlib.pylab.title",
"matplotlib.pylab.gca",
"matplotlib.pylab.plot"
]
] |
agarwal29796/napari | [
"aa815ea0430232cb91f7d8111da68c5a880841d3"
] | [
"napari/layers/image/image.py"
] | [
"\"\"\"Image class.\n\"\"\"\nfrom __future__ import annotations\n\nimport types\nimport warnings\nfrom typing import TYPE_CHECKING, Union\n\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom ...utils import config\nfrom ...utils.colormaps import AVAILABLE_COLORMAPS\nfrom ...utils.events import Event\nfrom ...utils.translations import trans\nfrom ..base import Layer\nfrom ..intensity_mixin import IntensityVisualizationMixin\nfrom ..utils.layer_utils import calc_data_range\nfrom ..utils.plane_manager import PlaneManager\nfrom ._image_constants import Interpolation, Interpolation3D, Rendering\nfrom ._image_slice import ImageSlice\nfrom ._image_slice_data import ImageSliceData\nfrom ._image_utils import guess_multiscale, guess_rgb\n\nif TYPE_CHECKING:\n from ...components.experimental.chunk import ChunkRequest\n\n\n# It is important to contain at least one abstractmethod to properly exclude this class\n# in creating NAMES set inside of napari.layers.__init__\n# Mixin must come before Layer\nclass _ImageBase(IntensityVisualizationMixin, Layer):\n \"\"\"Image layer.\n\n Parameters\n ----------\n data : array or list of array\n Image data. Can be N >= 2 dimensional. If the last dimension has length\n 3 or 4 can be interpreted as RGB or RGBA if rgb is `True`. If a\n list and arrays are decreasing in shape then the data is treated as\n a multiscale image. Please note multiscale rendering is only\n supported in 2D. In 3D, only the lowest resolution scale is\n displayed.\n rgb : bool\n Whether the image is rgb RGB or RGBA. If not specified by user and\n the last dimension of the data has length 3 or 4 it will be set as\n `True`. If `False` the image is interpreted as a luminance image.\n colormap : str, napari.utils.Colormap, tuple, dict\n Colormap to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image.\n gamma : float\n Gamma correction for determining colormap linearity. Defaults to 1.\n interpolation : str\n Interpolation mode used by vispy. Must be one of our supported\n modes.\n rendering : str\n Rendering mode used by vispy. Must be one of our supported\n modes.\n iso_threshold : float\n Threshold for isosurface.\n attenuation : float\n Attenuation rate for attenuated maximum intensity projection.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n rotate : float, 3-tuple of float, or n-D array.\n If a float convert into a 2D rotation matrix using that value as an\n angle. If 3-tuple convert into a 3D rotation matrix, using a yaw,\n pitch, roll convention. Otherwise assume an nD rotation. Angles are\n assumed to be in degrees. They can be converted from radians with\n np.degrees if needed.\n shear : 1-D array or n-D array\n Either a vector of upper triangular values, or an nD shear matrix with\n ones along the main diagonal.\n affine : n-D array or napari.utils.transforms.Affine\n (N+1, N+1) affine transformation matrix in homogeneous coordinates.\n The first (N, N) entries correspond to a linear transform and\n the final column is a length N translation vector and a 1 or a napari\n AffineTransform object. If provided then translate, scale, rotate, and\n shear values are ignored.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n multiscale : bool\n Whether the data is a multiscale image or not. Multiscale data is\n represented by a list of array like image data. If not specified by\n the user and if the data is a list of arrays that decrease in shape\n then it will be taken to be multiscale. The first image in the list\n should be the largest. Please note multiscale rendering is only\n supported in 2D. In 3D, only the lowest resolution scale is\n displayed.\n plane : dict or PlaneManager\n Properties defining plane rendering in 3D. Properties are defined in\n data coordinates. Valid dictionary keys are\n {'position', 'normal_vector', 'thickness', and 'enabled'}.\n\n Attributes\n ----------\n data : array or list of array\n Image data. Can be N dimensional. If the last dimension has length\n 3 or 4 can be interpreted as RGB or RGBA if rgb is `True`. If a list\n and arrays are decreasing in shape then the data is treated as a\n multiscale image. Please note multiscale rendering is only\n supported in 2D. In 3D, only the lowest resolution scale is\n displayed.\n metadata : dict\n Image metadata.\n rgb : bool\n Whether the image is rgb RGB or RGBA if rgb. If not\n specified by user and the last dimension of the data has length 3 or 4\n it will be set as `True`. If `False` the image is interpreted as a\n luminance image.\n multiscale : bool\n Whether the data is a multiscale image or not. Multiscale data is\n represented by a list of array like image data. The first image in the\n list should be the largest. Please note multiscale rendering is only\n supported in 2D. In 3D, only the lowest resolution scale is\n displayed.\n colormap : 2-tuple of str, napari.utils.Colormap\n The first is the name of the current colormap, and the second value is\n the colormap. Colormaps are used for luminance images, if the image is\n rgb the colormap is ignored.\n colormaps : tuple of str\n Names of the available colormaps.\n contrast_limits : list (2,) of float\n Color limits to be used for determining the colormap bounds for\n luminance images. If the image is rgb the contrast_limits is ignored.\n contrast_limits_range : list (2,) of float\n Range for the color limits for luminance images. If the image is\n rgb the contrast_limits_range is ignored.\n gamma : float\n Gamma correction for determining colormap linearity.\n interpolation : str\n Interpolation mode used by vispy. Must be one of our supported\n modes.\n rendering : str\n Rendering mode used by vispy. Must be one of our supported\n modes.\n iso_threshold : float\n Threshold for isosurface.\n attenuation : float\n Attenuation rate for attenuated maximum intensity projection.\n plane : PlaneManager\n Properties defining plane rendering in 3D.\n\n Notes\n -----\n _data_view : array (N, M), (N, M, 3), or (N, M, 4)\n Image data for the currently viewed slice. Must be 2D image data, but\n can be multidimensional for RGB or RGBA images if multidimensional is\n `True`.\n _colorbar : array\n Colorbar for current colormap.\n \"\"\"\n\n _colormaps = AVAILABLE_COLORMAPS\n\n def __init__(\n self,\n data,\n *,\n rgb=None,\n colormap='gray',\n contrast_limits=None,\n gamma=1,\n interpolation='nearest',\n rendering='mip',\n plane=PlaneManager(),\n iso_threshold=0.5,\n attenuation=0.05,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n rotate=None,\n shear=None,\n affine=None,\n opacity=1,\n blending='translucent',\n visible=True,\n multiscale=None,\n ):\n if isinstance(data, types.GeneratorType):\n data = list(data)\n\n if getattr(data, 'ndim', 2) < 2:\n raise ValueError(\n trans._('Image data must have at least 2 dimensions.')\n )\n\n # Determine if data is a multiscale\n if multiscale is None:\n multiscale, data = guess_multiscale(data)\n\n # Determine initial shape\n if multiscale:\n init_shape = data[0].shape\n else:\n init_shape = data.shape\n\n # Determine if rgb\n if rgb is None:\n rgb = guess_rgb(init_shape)\n\n # Determine dimensionality of the data\n if rgb:\n ndim = len(init_shape) - 1\n else:\n ndim = len(init_shape)\n\n super().__init__(\n data,\n ndim,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n rotate=rotate,\n shear=shear,\n affine=affine,\n opacity=opacity,\n blending=blending,\n visible=visible,\n multiscale=multiscale,\n )\n\n self.events.add(\n interpolation=Event,\n rendering=Event,\n iso_threshold=Event,\n attenuation=Event,\n )\n\n # Set data\n self.rgb = rgb\n self._data = data\n if self.multiscale:\n self._data_level = len(self.data) - 1\n # Determine which level of the multiscale to use for the thumbnail.\n # Pick the smallest level with at least one axis >= 64. This is\n # done to prevent the thumbnail from being from one of the very\n # low resolution layers and therefore being very blurred.\n big_enough_levels = [\n np.any(np.greater_equal(p.shape, 64)) for p in data\n ]\n if np.any(big_enough_levels):\n self._thumbnail_level = np.where(big_enough_levels)[0][-1]\n else:\n self._thumbnail_level = 0\n else:\n self._data_level = 0\n self._thumbnail_level = 0\n displayed_axes = self._displayed_axes\n self.corner_pixels[1][displayed_axes] = self.level_shapes[\n self._data_level\n ][displayed_axes]\n\n self._new_empty_slice()\n\n # Set contrast limits, colormaps and plane parameters\n self._gamma = gamma\n self._iso_threshold = iso_threshold\n self._attenuation = attenuation\n self._experimental_slicing_plane = PlaneManager()\n if contrast_limits is None:\n self.contrast_limits_range = self._calc_data_range()\n else:\n self.contrast_limits_range = contrast_limits\n self._contrast_limits = tuple(self.contrast_limits_range)\n self.colormap = colormap\n self.contrast_limits = self._contrast_limits\n self._interpolation = {\n 2: Interpolation.NEAREST,\n 3: (\n Interpolation3D.NEAREST\n if self.__class__.__name__ == 'Labels'\n else Interpolation3D.LINEAR\n ),\n }\n self.interpolation = interpolation\n self.rendering = rendering\n if plane is not None:\n self.experimental_slicing_plane.update(plane)\n\n # Trigger generation of view slice and thumbnail\n self._update_dims()\n\n def _new_empty_slice(self):\n \"\"\"Initialize the current slice to an empty image.\"\"\"\n self._slice = ImageSlice(\n self._get_empty_image(), self._raw_to_displayed, self.rgb\n )\n self._empty = True\n\n def _get_empty_image(self):\n \"\"\"Get empty image to use as the default before data is loaded.\"\"\"\n if self.rgb:\n return np.zeros((1,) * self._ndisplay + (3,))\n else:\n return np.zeros((1,) * self._ndisplay)\n\n def _get_order(self):\n \"\"\"Return the order of the displayed dimensions.\"\"\"\n if self.rgb:\n # if rgb need to keep the final axis fixed during the\n # transpose. The index of the final axis depends on how many\n # axes are displayed.\n return self._dims_displayed_order + (\n max(self._dims_displayed_order) + 1,\n )\n else:\n return self._dims_displayed_order\n\n @property\n def _data_view(self):\n \"\"\"Viewable image for the current slice. (compatibility)\"\"\"\n return self._slice.image.view\n\n @property\n def _data_raw(self):\n \"\"\"Raw image for the current slice. (compatibility)\"\"\"\n return self._slice.image.raw\n\n def _calc_data_range(self, mode='data'):\n if mode == 'data':\n input_data = self.data[-1] if self.multiscale else self.data\n elif mode == 'slice':\n data = self._slice.image.view # ugh\n input_data = data[-1] if self.multiscale else data\n else:\n raise ValueError(\n f\"mode must be either 'data' or 'slice', got {mode!r}\"\n )\n return calc_data_range(input_data, rgb=self.rgb)\n\n @property\n def dtype(self):\n return self.data[0].dtype if self.multiscale else self.data.dtype\n\n @property\n def data(self):\n \"\"\"array: Image data.\"\"\"\n return self._data\n\n @data.setter\n def data(self, data):\n self._data = data\n self._update_dims()\n self.events.data(value=self.data)\n if self._keep_autoscale:\n self.reset_contrast_limits()\n self._set_editable()\n\n def _get_ndim(self):\n \"\"\"Determine number of dimensions of the layer.\"\"\"\n return len(self.level_shapes[0])\n\n @property\n def _extent_data(self) -> np.ndarray:\n \"\"\"Extent of layer in data coordinates.\n\n Returns\n -------\n extent_data : array, shape (2, D)\n \"\"\"\n shape = np.subtract(self.level_shapes[0], 1)\n return np.vstack([np.zeros(len(shape)), shape])\n\n @property\n def data_level(self):\n \"\"\"int: Current level of multiscale, or 0 if image.\"\"\"\n return self._data_level\n\n @data_level.setter\n def data_level(self, level):\n if self._data_level == level:\n return\n self._data_level = level\n self.refresh()\n\n @property\n def level_shapes(self):\n \"\"\"array: Shapes of each level of the multiscale or just of image.\"\"\"\n if self.multiscale:\n if self.rgb:\n shapes = [im.shape[:-1] for im in self.data]\n else:\n shapes = [im.shape for im in self.data]\n else:\n if self.rgb:\n shapes = [self.data.shape[:-1]]\n else:\n shapes = [self.data.shape]\n return np.array(shapes)\n\n @property\n def downsample_factors(self):\n \"\"\"list: Downsample factors for each level of the multiscale.\"\"\"\n return np.divide(self.level_shapes[0], self.level_shapes)\n\n @property\n def iso_threshold(self):\n \"\"\"float: threshold for isosurface.\"\"\"\n return self._iso_threshold\n\n @iso_threshold.setter\n def iso_threshold(self, value):\n self._iso_threshold = value\n self._update_thumbnail()\n self.events.iso_threshold()\n\n @property\n def attenuation(self):\n \"\"\"float: attenuation rate for attenuated_mip rendering.\"\"\"\n return self._attenuation\n\n @attenuation.setter\n def attenuation(self, value):\n self._attenuation = value\n self._update_thumbnail()\n self.events.attenuation()\n\n @property\n def interpolation(self):\n \"\"\"Return current interpolation mode.\n\n Selects a preset interpolation mode in vispy that determines how volume\n is displayed. Makes use of the two Texture2D interpolation methods and\n the available interpolation methods defined in\n vispy/gloo/glsl/misc/spatial_filters.frag\n\n Options include:\n 'bessel', 'bicubic', 'bilinear', 'blackman', 'catrom', 'gaussian',\n 'hamming', 'hanning', 'hermite', 'kaiser', 'lanczos', 'mitchell',\n 'nearest', 'spline16', 'spline36'\n\n Returns\n -------\n str\n The current interpolation mode\n \"\"\"\n return str(self._interpolation[self._ndisplay])\n\n @interpolation.setter\n def interpolation(self, interpolation):\n \"\"\"Set current interpolation mode.\"\"\"\n if self._ndisplay == 3:\n self._interpolation[self._ndisplay] = Interpolation3D(\n interpolation\n )\n else:\n self._interpolation[self._ndisplay] = Interpolation(interpolation)\n self.events.interpolation(value=self._interpolation[self._ndisplay])\n\n @property\n def rendering(self):\n \"\"\"Return current rendering mode.\n\n Selects a preset rendering mode in vispy that determines how\n volume is displayed. Options include:\n\n * ``translucent``: voxel colors are blended along the view ray until\n the result is opaque.\n * ``mip``: maximum intensity projection. Cast a ray and display the\n maximum value that was encountered.\n * ``additive``: voxel colors are added along the view ray until the\n result is saturated.\n * ``iso``: isosurface. Cast a ray until a certain threshold is\n encountered. At that location, lighning calculations are performed to\n give the visual appearance of a surface.\n * ``attenuated_mip``: attenuated maximum intensity projection. Cast a\n ray and attenuate values based on integral of encountered values,\n display the maximum value that was encountered after attenuation.\n This will make nearer objects appear more prominent.\n\n Returns\n -------\n str\n The current rendering mode\n \"\"\"\n return str(self._rendering)\n\n @rendering.setter\n def rendering(self, rendering):\n \"\"\"Set current rendering mode.\"\"\"\n self._rendering = Rendering(rendering)\n self.events.rendering()\n\n @property\n def experimental_slicing_plane(self):\n return self._experimental_slicing_plane\n\n @experimental_slicing_plane.setter\n def plane(self, value: Union[dict, PlaneManager]):\n self._experimental_slicing_plane.update(value)\n\n @property\n def loaded(self):\n \"\"\"Has the data for this layer been loaded yet.\n\n With asynchronous loading the layer might exist but its data\n for the current slice has not been loaded.\n \"\"\"\n return self._slice.loaded\n\n def _raw_to_displayed(self, raw):\n \"\"\"Determine displayed image from raw image.\n\n For normal image layers, just return the actual image.\n\n Parameters\n ----------\n raw : array\n Raw array.\n\n Returns\n -------\n image : array\n Displayed array.\n \"\"\"\n image = raw\n return image\n\n def _set_view_slice(self):\n \"\"\"Set the view given the indices to slice with.\"\"\"\n self._new_empty_slice()\n not_disp = self._dims_not_displayed\n\n # Check if requested slice outside of data range\n indices = np.array(self._slice_indices)\n extent = self._extent_data\n if np.any(\n np.less(\n [indices[ax] for ax in not_disp],\n [extent[0, ax] for ax in not_disp],\n )\n ) or np.any(\n np.greater(\n [indices[ax] for ax in not_disp],\n [extent[1, ax] for ax in not_disp],\n )\n ):\n return\n self._empty = False\n\n if self.multiscale:\n if self._ndisplay == 3:\n # If 3d redering just show lowest level of multiscale\n warnings.warn(\n trans._(\n 'Multiscale rendering is only supported in 2D. In 3D, only the lowest resolution scale is displayed',\n deferred=True,\n ),\n category=UserWarning,\n )\n self.data_level = len(self.data) - 1\n\n # Slice currently viewed level\n level = self.data_level\n indices = np.array(self._slice_indices)\n downsampled_indices = (\n indices[not_disp] / self.downsample_factors[level, not_disp]\n )\n downsampled_indices = np.round(\n downsampled_indices.astype(float)\n ).astype(int)\n downsampled_indices = np.clip(\n downsampled_indices, 0, self.level_shapes[level, not_disp] - 1\n )\n indices[not_disp] = downsampled_indices\n\n scale = np.ones(self.ndim)\n for d in self._dims_displayed:\n scale[d] = self.downsample_factors[self.data_level][d]\n self._transforms['tile2data'].scale = scale\n\n if self._ndisplay == 2:\n for d in self._displayed_axes:\n indices[d] = slice(\n self.corner_pixels[0, d],\n self.corner_pixels[1, d] + 1,\n 1,\n )\n self._transforms['tile2data'].translate = (\n self.corner_pixels[0] * self._transforms['tile2data'].scale\n )\n image = self.data[level][tuple(indices)]\n image_indices = indices\n\n # Slice thumbnail\n indices = np.array(self._slice_indices)\n downsampled_indices = (\n indices[not_disp]\n / self.downsample_factors[self._thumbnail_level, not_disp]\n )\n downsampled_indices = np.round(\n downsampled_indices.astype(float)\n ).astype(int)\n downsampled_indices = np.clip(\n downsampled_indices,\n 0,\n self.level_shapes[self._thumbnail_level, not_disp] - 1,\n )\n indices[not_disp] = downsampled_indices\n\n thumbnail_source = self.data[self._thumbnail_level][tuple(indices)]\n else:\n self._transforms['tile2data'].scale = np.ones(self.ndim)\n image_indices = self._slice_indices\n image = self.data[image_indices]\n\n # For single-scale we don't request a separate thumbnail_source\n # from the ChunkLoader because in ImageSlice.chunk_loaded we\n # call request.thumbnail_source() and it knows to just use the\n # image itself is there is no explicit thumbnail_source.\n thumbnail_source = None\n\n # Load our images, might be sync or async.\n data = self._SliceDataClass(\n self, image_indices, image, thumbnail_source\n )\n self._load_slice(data)\n if self._keep_autoscale:\n self.reset_contrast_limits()\n\n @property\n def _SliceDataClass(self):\n # Use special ChunkedSlideData for async.\n if config.async_loading:\n from .experimental._chunked_slice_data import ChunkedSliceData\n\n return ChunkedSliceData\n return ImageSliceData\n\n def _load_slice(self, data: ImageSliceData):\n \"\"\"Load the image and maybe thumbnail source.\n\n Parameters\n ----------\n data : Slice\n \"\"\"\n if self._slice.load(data):\n # The load was synchronous.\n self._on_data_loaded(data, sync=True)\n else:\n # The load will be asynchronous. Signal that our self.loaded\n # property is now false, since the load is in progress.\n self.events.loaded()\n\n def _on_data_loaded(self, data: ImageSliceData, sync: bool) -> None:\n \"\"\"The given data a was loaded, use it now.\n\n This routine is called synchronously from _load_async() above, or\n it is called asynchronously sometime later when the ChunkLoader\n finishes loading the data in a worker thread or process.\n\n Parameters\n ----------\n data : ChunkRequest\n The request that was satisfied/loaded.\n sync : bool\n If True the chunk was loaded synchronously.\n \"\"\"\n # Transpose after the load.\n data.transpose(self._get_order())\n\n # Pass the loaded data to the slice.\n if not self._slice.on_loaded(data):\n # Slice rejected it, was it for the wrong indices?\n return\n\n # Notify the world.\n if self.multiscale:\n self.events.scale()\n self.events.translate()\n\n # Announcing we are in the loaded state will make our node visible\n # if it was invisible during the load.\n self.events.loaded()\n\n if not sync:\n # TODO_ASYNC: Avoid calling self.refresh(), because it would\n # call our _set_view_slice(). Do we need a \"refresh without\n # set_view_slice()\" method that we can call?\n\n self.events.set_data(value=self._slice) # update vispy\n self._update_thumbnail()\n\n def _update_thumbnail(self):\n \"\"\"Update thumbnail with current image data and colormap.\"\"\"\n if not self.loaded:\n # ASYNC_TODO: Do not compute the thumbnail until we are loaded.\n # Is there a nicer way to prevent this from getting called?\n return\n\n image = self._slice.thumbnail.view\n\n if self._ndisplay == 3 and self.ndim > 2:\n image = np.max(image, axis=0)\n\n # float16 not supported by ndi.zoom\n dtype = np.dtype(image.dtype)\n if dtype in [np.dtype(np.float16)]:\n image = image.astype(np.float32)\n\n raw_zoom_factor = np.divide(\n self._thumbnail_shape[:2], image.shape[:2]\n ).min()\n new_shape = np.clip(\n raw_zoom_factor * np.array(image.shape[:2]),\n 1, # smallest side should be 1 pixel wide\n self._thumbnail_shape[:2],\n )\n zoom_factor = tuple(new_shape / image.shape[:2])\n if self.rgb:\n # warning filter can be removed with scipy 1.4\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n downsampled = ndi.zoom(\n image, zoom_factor + (1,), prefilter=False, order=0\n )\n if image.shape[2] == 4: # image is RGBA\n colormapped = np.copy(downsampled)\n colormapped[..., 3] = downsampled[..., 3] * self.opacity\n if downsampled.dtype == np.uint8:\n colormapped = colormapped.astype(np.uint8)\n else: # image is RGB\n if downsampled.dtype == np.uint8:\n alpha = np.full(\n downsampled.shape[:2] + (1,),\n int(255 * self.opacity),\n dtype=np.uint8,\n )\n else:\n alpha = np.full(downsampled.shape[:2] + (1,), self.opacity)\n colormapped = np.concatenate([downsampled, alpha], axis=2)\n else:\n # warning filter can be removed with scipy 1.4\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n downsampled = ndi.zoom(\n image, zoom_factor, prefilter=False, order=0\n )\n low, high = self.contrast_limits\n downsampled = np.clip(downsampled, low, high)\n color_range = high - low\n if color_range != 0:\n downsampled = (downsampled - low) / color_range\n downsampled = downsampled ** self.gamma\n color_array = self.colormap.map(downsampled.ravel())\n colormapped = color_array.reshape(downsampled.shape + (4,))\n colormapped[..., 3] *= self.opacity\n self.thumbnail = colormapped\n\n def _get_value(self, position):\n \"\"\"Value of the data at a position in data coordinates.\n\n Parameters\n ----------\n position : tuple\n Position in data coordinates.\n\n Returns\n -------\n value : tuple\n Value of the data.\n \"\"\"\n if self.multiscale:\n # for multiscale data map the coordinate from the data back to\n # the tile\n coord = self._transforms['tile2data'].inverse(position)\n else:\n coord = position\n\n coord = np.round(coord).astype(int)\n\n raw = self._slice.image.raw\n if self.rgb:\n shape = raw.shape[:-1]\n else:\n shape = raw.shape\n\n if all(0 <= c < s for c, s in zip(coord[self._dims_displayed], shape)):\n value = raw[tuple(coord[self._dims_displayed])]\n else:\n value = None\n\n if self.multiscale:\n value = (self.data_level, value)\n\n return value\n\n # For async we add an on_chunk_loaded() method.\n if config.async_loading:\n\n def on_chunk_loaded(self, request: ChunkRequest) -> None:\n \"\"\"An asynchronous ChunkRequest was loaded.\n\n Parameters\n ----------\n request : ChunkRequest\n This request was loaded.\n \"\"\"\n # Convert the ChunkRequest to SliceData and use it.\n data = self._SliceDataClass.from_request(self, request)\n self._on_data_loaded(data, sync=False)\n\n\nclass Image(_ImageBase):\n def _get_state(self):\n \"\"\"Get dictionary of layer state.\n\n Returns\n -------\n state : dict\n Dictionary of layer state.\n \"\"\"\n state = self._get_base_state()\n state.update(\n {\n 'rgb': self.rgb,\n 'multiscale': self.multiscale,\n 'colormap': self.colormap.name,\n 'contrast_limits': self.contrast_limits,\n 'interpolation': self.interpolation,\n 'rendering': self.rendering,\n 'plane': self.experimental_slicing_plane.dict(),\n 'iso_threshold': self.iso_threshold,\n 'attenuation': self.attenuation,\n 'gamma': self.gamma,\n 'data': self.data,\n }\n )\n return state\n\n\nif config.async_octree:\n from ..image.experimental.octree_image import _OctreeImageBase\n\n class Image(Image, _OctreeImageBase):\n pass\n"
] | [
[
"numpy.divide",
"numpy.max",
"numpy.array",
"numpy.less",
"numpy.concatenate",
"numpy.greater",
"numpy.zeros",
"numpy.full",
"numpy.round",
"numpy.copy",
"numpy.ones",
"numpy.greater_equal",
"numpy.any",
"numpy.where",
"numpy.subtract",
"numpy.clip",
"scipy.ndimage.zoom",
"numpy.dtype"
]
] |
arturtoshev/ncsnv2 | [
"5e0159360e5893a0cac83defc2a83fa0ff633137"
] | [
"models/__init__.py"
] | [
"import torch\nimport numpy as np\n\nfrom sgllmc import anneal_Levy_Langevin_dynamics as anneal_Langevin_dynamics\nfrom sgllmc import anneal_Levy_Langevin_dynamics_inpainting as anneal_Langevin_dynamics_inpainting\nfrom sgllmc import anneal_Levy_Langevin_dynamics_interpolation as anneal_Langevin_dynamics_interpolation\n\ndef get_sigmas(config):\n if config.model.sigma_dist == 'geometric':\n sigmas = torch.tensor(\n np.exp(np.linspace(np.log(config.model.sigma_begin), np.log(config.model.sigma_end),\n config.model.num_classes))).float().to(config.device)\n elif config.model.sigma_dist == 'uniform':\n sigmas = torch.tensor(\n np.linspace(config.model.sigma_begin, config.model.sigma_end, config.model.num_classes)\n ).float().to(config.device)\n\n else:\n raise NotImplementedError('sigma distribution not supported')\n\n return sigmas\n\n#\n# @torch.no_grad()\n# def anneal_Langevin_dynamics(x_mod, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,\n# final_only=False, verbose=False, denoise=True):\n# images = []\n#\n# with torch.no_grad():\n# for c, sigma in enumerate(sigmas):\n# labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c\n# labels = labels.long()\n# step_size = step_lr * (sigma / sigmas[-1]) ** 2\n# for s in range(n_steps_each):\n# grad = scorenet(x_mod, labels)\n#\n# noise = torch.randn_like(x_mod)\n# grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n# noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()\n# x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)\n#\n# image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()\n# snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm\n# grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2\n#\n# if not final_only:\n# images.append(x_mod.to('cpu'))\n# if verbose:\n# print(\"level: {}, step_size: {}, grad_norm: {}, image_norm: {}, snr: {}, grad_mean_norm: {}\".format(\n# c, step_size, grad_norm.item(), image_norm.item(), snr.item(), grad_mean_norm.item()))\n#\n# if denoise:\n# last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)\n# last_noise = last_noise.long()\n# x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)\n# images.append(x_mod.to('cpu'))\n#\n# if final_only:\n# return [x_mod.to('cpu')]\n# else:\n# return images\n#\n#\n# @torch.no_grad()\n# def anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size,\n# n_steps_each=100, step_lr=0.000008):\n# \"\"\"\n# Currently only good for 32x32 images. Assuming the right half is missing.\n# \"\"\"\n#\n# images = []\n#\n# refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1)\n# refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size)\n# x_mod = x_mod.view(-1, 3, image_size, image_size)\n# cols = image_size // 2\n# half_refer_image = refer_image[..., :cols]\n# with torch.no_grad():\n# for c, sigma in enumerate(sigmas):\n# labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c\n# labels = labels.long()\n# step_size = step_lr * (sigma / sigmas[-1]) ** 2\n#\n# for s in range(n_steps_each):\n# images.append(x_mod.to('cpu'))\n# corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma\n# x_mod[:, :, :, :cols] = corrupted_half_image\n# noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)\n# grad = scorenet(x_mod, labels)\n# x_mod = x_mod + step_size * grad + noise\n# print(\"class: {}, step_size: {}, mean {}, max {}\".format(c, step_size, grad.abs().mean(),\n# grad.abs().max()))\n#\n# return images\n#\n# @torch.no_grad()\n# def anneal_Langevin_dynamics_interpolation(x_mod, scorenet, sigmas, n_interpolations, n_steps_each=200, step_lr=0.000008,\n# final_only=False, verbose=False):\n# images = []\n#\n# n_rows = x_mod.shape[0]\n#\n# x_mod = x_mod[:, None, ...].repeat(1, n_interpolations, 1, 1, 1)\n# x_mod = x_mod.reshape(-1, *x_mod.shape[2:])\n#\n# for c, sigma in enumerate(sigmas):\n# labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c\n# labels = labels.long()\n# step_size = step_lr * (sigma / sigmas[-1]) ** 2\n# for s in range(n_steps_each):\n# grad = scorenet(x_mod, labels)\n#\n# noise_p = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],\n# device=x_mod.device)\n# noise_q = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],\n# device=x_mod.device)\n# angles = torch.linspace(0, np.pi / 2., n_interpolations, device=x_mod.device)\n#\n# noise = noise_p[:, None, ...] * torch.cos(angles)[None, :, None, None, None] + \\\n# noise_q[:, None, ...] * torch.sin(angles)[None, :, None, None, None]\n#\n# noise = noise.reshape(-1, *noise.shape[2:])\n# grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n# noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()\n# image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()\n#\n# x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)\n#\n# snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm\n#\n# if not final_only:\n# images.append(x_mod.to('cpu'))\n# if verbose:\n# print(\n# \"level: {}, step_size: {}, image_norm: {}, grad_norm: {}, snr: {}\".format(\n# c, step_size, image_norm.item(), grad_norm.item(), snr.item()))\n#\n#\n# if final_only:\n# return [x_mod.to('cpu')]\n# else:\n# return images\n\n\n\n\n\n"
] | [
[
"numpy.linspace",
"numpy.log"
]
] |
neomatrix369/embeddings-for-trees | [
"022fe30df4ece0162c16aaa42a59c93240eae180"
] | [
"utils/common.py"
] | [
"import os\nimport random\nfrom os import mkdir\nfrom os.path import exists\nfrom shutil import rmtree\nfrom tarfile import open as tar_open\nfrom typing import List\n\nimport dgl\nimport numpy as np\nimport torch\nfrom tqdm.auto import tqdm\n\nSOS = '<SOS>'\nEOS = '<EOS>'\nPAD = '<PAD>'\nUNK = '<UNK>'\nNAN = 'NAN'\nMETHOD_NAME = 'METHOD_NAME'\nSELF = '<SELF>'\n\n\ndef get_device() -> torch.device:\n # CUDA for PyTorch\n use_cuda = torch.cuda.is_available()\n device = torch.device('cuda:0' if use_cuda else 'cpu')\n return device\n\n\ndef fix_seed(seed: int = 7) -> None:\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n dgl.random.seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef extract_tar_gz(tar_path: str, extract_path: str) -> None:\n def tqdm_progress(members):\n extract_progress_bar = tqdm(total=len(list(members.getnames())))\n for member in members:\n extract_progress_bar.update()\n yield member\n extract_progress_bar.close()\n\n with tar_open(tar_path, 'r:gz') as tarball:\n tarball.extractall(extract_path, members=tqdm_progress(tarball))\n\n\ndef create_folder(path: str, is_clean: bool = True) -> None:\n if is_clean and exists(path):\n rmtree(path)\n if not exists(path):\n mkdir(path)\n\n\ndef segment_sizes_to_slices(sizes: List) -> List:\n cum_sums = np.cumsum(sizes)\n start_of_segments = np.append([0], cum_sums[:-1])\n return [slice(start, end) for start, end in zip(start_of_segments, cum_sums)]\n\n\ndef is_step_match(current_step: int, template: int, ignore_zero: bool = True) -> bool:\n match_template = template != -1 and current_step % template == 0\n if ignore_zero:\n return match_template and current_step != 0\n return match_template\n\n"
] | [
[
"torch.device",
"numpy.append",
"torch.cuda.manual_seed",
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.cumsum"
]
] |
Wilscos/recommender-systems-bibliometric-analysis | [
"bbba45340bf4d494278784ca1376e443f47d5012"
] | [
"utils.py"
] | [
"import os\nimport json\nimport numpy as np\nimport re\nimport pandas as pd\nfrom config import DATA_PATH, PDF_PATH\n\n\ndef read_data():\n if not os.path.isdir(DATA_PATH):\n os.mkdir(DATA_PATH)\n\n papers_df = pd.read_csv(f'{DATA_PATH}/df.csv')\n papers_df.drop('Unnamed: 0', axis=1, inplace=True)\n\n return papers_df\n\n\ndef obj_to_file(a_list, file_name):\n with open(os.path.join(DATA_PATH, f'{file_name}.txt'), 'w') as file:\n json.dump(a_list, file, ensure_ascii=False)\n\n\ndef get_pdf_from_txt_name(df):\n txt_names = df['file'].tolist()\n pdf_files = np.array(os.listdir(PDF_PATH))\n txt_idxes = [int(re.findall('\\d+', txt)[0]) - 1 for txt in txt_names]\n our_pdf_files = pdf_files[txt_idxes]\n # arxiv.org/abs/name_pdf.pdf\n df['pdf_file'] = our_pdf_files\n\n return df\n\n\ndef save_json(out_path: str, data: dict):\n with open(out_path, 'w') as f:\n json.dump(data, f, indent=6)\n\n\ndef from_json_to_list_tuples(json_path: str):\n json_file = open(json_path)\n # It returns a list of lists\n data = json.load(json_file)\n # Converting to a list of tuples\n data_list = [(pos[0], pos[1]) for pos in data]\n\n return data_list\n"
] | [
[
"pandas.read_csv"
]
] |
cnheider/ray | [
"9b33f3a7b7d799378decc2b7ef065e279599825d"
] | [
"python/ray/rllib/models/action_dist.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nfrom ray.rllib.utils.reshaper import Reshaper\n\n\nclass ActionDistribution(object):\n \"\"\"The policy action distribution of an agent.\n\n Args:\n inputs (Tensor): The input vector to compute samples from.\n \"\"\"\n\n def __init__(self, inputs):\n self.inputs = inputs\n\n def logp(self, x):\n \"\"\"The log-likelihood of the action distribution.\"\"\"\n raise NotImplementedError\n\n def kl(self, other):\n \"\"\"The KL-divergence between two action distributions.\"\"\"\n raise NotImplementedError\n\n def entropy(self):\n \"\"\"The entroy of the action distribution.\"\"\"\n raise NotImplementedError\n\n def sample(self):\n \"\"\"Draw a sample from the action distribution.\"\"\"\n raise NotImplementedError\n\n\nclass Categorical(ActionDistribution):\n \"\"\"Categorical distribution for discrete action spaces.\"\"\"\n\n def logp(self, x):\n return -tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=self.inputs, labels=x)\n\n def entropy(self):\n a0 = self.inputs - tf.reduce_max(self.inputs, reduction_indices=[1],\n keep_dims=True)\n ea0 = tf.exp(a0)\n z0 = tf.reduce_sum(ea0, reduction_indices=[1], keep_dims=True)\n p0 = ea0 / z0\n return tf.reduce_sum(p0 * (tf.log(z0) - a0), reduction_indices=[1])\n\n def kl(self, other):\n a0 = self.inputs - tf.reduce_max(self.inputs, reduction_indices=[1],\n keep_dims=True)\n a1 = other.inputs - tf.reduce_max(other.inputs, reduction_indices=[1],\n keep_dims=True)\n ea0 = tf.exp(a0)\n ea1 = tf.exp(a1)\n z0 = tf.reduce_sum(ea0, reduction_indices=[1], keep_dims=True)\n z1 = tf.reduce_sum(ea1, reduction_indices=[1], keep_dims=True)\n p0 = ea0 / z0\n return tf.reduce_sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)),\n reduction_indices=[1])\n\n def sample(self):\n return tf.multinomial(self.inputs, 1)[0]\n\n\nclass DiagGaussian(ActionDistribution):\n \"\"\"Action distribution where each vector element is a gaussian.\n\n The first half of the input vector defines the gaussian means, and the\n second half the gaussian standard deviations.\n \"\"\"\n\n def __init__(self, inputs):\n ActionDistribution.__init__(self, inputs)\n mean, log_std = tf.split(inputs, 2, axis=1)\n self.mean = mean\n self.log_std = log_std\n self.std = tf.exp(log_std)\n\n def logp(self, x):\n return (-0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std),\n reduction_indices=[1]) -\n 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[1]) -\n tf.reduce_sum(self.log_std, reduction_indices=[1]))\n\n def kl(self, other):\n assert isinstance(other, DiagGaussian)\n return tf.reduce_sum(other.log_std - self.log_std +\n (tf.square(self.std) +\n tf.square(self.mean - other.mean)) /\n (2.0 * tf.square(other.std)) - 0.5,\n reduction_indices=[1])\n\n def entropy(self):\n return tf.reduce_sum(self.log_std + .5 * np.log(2.0 * np.pi * np.e),\n reduction_indices=[1])\n\n def sample(self):\n return self.mean + self.std * tf.random_normal(tf.shape(self.mean))\n\n\nclass Deterministic(ActionDistribution):\n \"\"\"Action distribution that returns the input values directly.\n\n This is similar to DiagGaussian with standard deviation zero.\n \"\"\"\n\n def sample(self):\n return self.inputs\n\n\nclass MultiActionDistribution(ActionDistribution):\n \"\"\"Action distribution that operates for list of actions.\n\n Args:\n inputs (Tensor list): A list of tensors from which to compute samples.\n \"\"\"\n def __init__(self, inputs, action_space, child_distributions):\n # you actually have to instantiate the child distributions\n self.reshaper = Reshaper(action_space.spaces)\n split_inputs = self.reshaper.split_tensor(inputs)\n child_list = []\n for i, distribution in enumerate(child_distributions):\n child_list.append(distribution(split_inputs[i]))\n self.child_distributions = child_list\n\n def logp(self, x):\n \"\"\"The log-likelihood of the action distribution.\"\"\"\n split_list = self.reshaper.split_tensor(x)\n for i, distribution in enumerate(self.child_distributions):\n # Remove extra categorical dimension\n if isinstance(distribution, Categorical):\n split_list[i] = tf.squeeze(split_list[i], axis=-1)\n log_list = np.asarray([distribution.logp(split_x) for\n distribution, split_x in\n zip(self.child_distributions, split_list)])\n return np.sum(log_list)\n\n def kl(self, other):\n \"\"\"The KL-divergence between two action distributions.\"\"\"\n kl_list = np.asarray([distribution.kl(other_distribution) for\n distribution, other_distribution in\n zip(self.child_distributions,\n other.child_distributions)])\n return np.sum(kl_list)\n\n def entropy(self):\n \"\"\"The entropy of the action distribution.\"\"\"\n entropy_list = np.array([s.entropy() for s in\n self.child_distributions])\n return np.sum(entropy_list)\n\n def sample(self):\n \"\"\"Draw a sample from the action distribution.\"\"\"\n return [[s.sample() for s in self.child_distributions]]\n"
] | [
[
"tensorflow.exp",
"tensorflow.shape",
"numpy.log",
"numpy.sum",
"tensorflow.multinomial",
"tensorflow.reduce_max",
"tensorflow.log",
"tensorflow.squeeze",
"tensorflow.reduce_sum",
"tensorflow.split",
"tensorflow.square",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits"
]
] |
future-xy/dlrm | [
"aae60757350c467c63893229e149223b6996400e"
] | [
"torchrec_dlrm/data/dlrm_dataloader.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport os\nfrom typing import List\n\nfrom torch import distributed as dist\nfrom torch.utils.data import DataLoader\nfrom torchrec.datasets.criteo import (\n CAT_FEATURE_COUNT,\n DEFAULT_CAT_NAMES,\n DEFAULT_INT_NAMES,\n DAYS,\n InMemoryBinaryCriteoIterDataPipe,\n)\nfrom torchrec.datasets.random import RandomRecDataset\n\nSTAGES = [\"train\", \"val\", \"test\"]\n\n\ndef _get_random_dataloader(\n args: argparse.Namespace,\n) -> DataLoader:\n return DataLoader(\n RandomRecDataset(\n keys=DEFAULT_CAT_NAMES,\n batch_size=args.batch_size,\n hash_size=args.num_embeddings,\n hash_sizes=args.num_embeddings_per_feature\n if hasattr(args, \"num_embeddings_per_feature\")\n else None,\n manual_seed=args.seed if hasattr(args, \"seed\") else None,\n ids_per_feature=1,\n num_dense=len(DEFAULT_INT_NAMES),\n ),\n batch_size=None,\n batch_sampler=None,\n pin_memory=args.pin_memory,\n num_workers=0,\n )\n\n\ndef _get_in_memory_dataloader(\n args: argparse.Namespace,\n stage: str,\n) -> DataLoader:\n files = os.listdir(args.in_memory_binary_criteo_path)\n\n def is_final_day(s: str) -> bool:\n return f\"day_{DAYS - 1}\" in s\n\n if stage == \"train\":\n # Train set gets all data except from the final day.\n files = list(filter(lambda s: not is_final_day(s), files))\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n else:\n # Validation set gets the first half of the final day's samples. Test set get\n # the other half.\n files = list(filter(is_final_day, files))\n rank = (\n dist.get_rank()\n if stage == \"val\"\n else dist.get_rank() + dist.get_world_size()\n )\n world_size = dist.get_world_size() * 2\n\n stage_files: List[List[str]] = [\n sorted(\n map(\n lambda x: os.path.join(args.in_memory_binary_criteo_path, x),\n filter(lambda s: kind in s, files),\n )\n )\n for kind in [\"dense\", \"sparse\", \"labels\"]\n ]\n dataloader = DataLoader(\n InMemoryBinaryCriteoIterDataPipe(\n *stage_files, # pyre-ignore[6]\n batch_size=args.batch_size,\n rank=rank,\n world_size=world_size,\n shuffle_batches=args.shuffle_batches,\n hashes=args.num_embeddings_per_feature\n if args.num_embeddings is None\n else ([args.num_embeddings] * CAT_FEATURE_COUNT),\n ),\n batch_size=None,\n pin_memory=args.pin_memory,\n collate_fn=lambda x: x,\n )\n return dataloader\n\n\ndef get_dataloader(args: argparse.Namespace, backend: str, stage: str) -> DataLoader:\n \"\"\"\n Gets desired dataloader from dlrm_main command line options. Currently, this\n function is able to return either a DataLoader wrapped around a RandomRecDataset or\n a Dataloader wrapped around an InMemoryBinaryCriteoIterDataPipe.\n\n Args:\n args (argparse.Namespace): Command line options supplied to dlrm_main.py's main\n function.\n backend (str): \"nccl\" or \"gloo\".\n stage (str): \"train\", \"val\", or \"test\".\n\n Returns:\n dataloader (DataLoader): PyTorch dataloader for the specified options.\n\n \"\"\"\n stage = stage.lower()\n if stage not in STAGES:\n raise ValueError(f\"Supplied stage was {stage}. Must be one of {STAGES}.\")\n\n args.pin_memory = (\n (backend == \"nccl\") if not hasattr(args, \"pin_memory\") else args.pin_memory\n )\n\n if (\n not hasattr(args, \"in_memory_binary_criteo_path\")\n or args.in_memory_binary_criteo_path is None\n ):\n return _get_random_dataloader(args)\n else:\n return _get_in_memory_dataloader(args, stage)\n"
] | [
[
"torch.distributed.get_world_size",
"torch.distributed.get_rank"
]
] |
nismod/microsimulation | [
"37ce2843f10b83a8e7a225c801cec83b85e6e0d0"
] | [
"scripts/check.py"
] | [
"\nimport pandas as pd\n\nyears = range(2011,2051)\n\nlad = \"E07000041\"\nprojs = [\"ppp\", \"hhh\"]\n\nfor year in years:\n filep = \"./data/ssm_\" + lad + \"_MSOA11_\" + projs[0] + \"_\" + str(year) + \".csv\"\n fileh = \"./data/ssm_\" + lad + \"_MSOA11_\" + projs[1] + \"_\" + str(year) + \".csv\"\n dfp = pd.read_csv(filep)\n dfh = pd.read_csv(fileh)\n print(year, len(dfp), len(dfh))"
] | [
[
"pandas.read_csv"
]
] |
RSRamKumar/Master_thesis | [
"3704862da452cffa33cee1ed4e40fd835c28eeb5"
] | [
"python scripts/drugbank_to_chembl_english_names.py"
] | [
"\r\n#!/usr/bin/env python3\r\n\r\n# Import relevant libraries for HTTP request and JSON formatting\r\nimport requests\r\nimport json\r\nimport re\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\n\r\ndef scraping_drug_names_from_DrugBank_website(drugbank_id):\r\n names_scrapped = []\r\n chembl_scrapped = []\r\n response = requests.get(\"https://go.drugbank.com/drugs/{}\".format(drugbank_id)).content\r\n soup = BeautifulSoup(response, \"html.parser\")\r\n # drug_name = soup.title.text.split(\":\")[0]\r\n drug_name = soup.find(\"dd\", class_=\"col-xl-4 col-md-9 col-sm-8 pr-xl-2\").text\r\n names_scrapped.append(drug_name)\r\n chembl_finds = []\r\n chembl_soup = soup.find_all(\"dd\", class_=\"col-md-8 col-sm-7\")\r\n\r\n for i in chembl_soup:\r\n id = re.findall(r\"CHEMBL\\w+\", i.text)\r\n if len(id) >= 1:\r\n chembl_finds.append(id)\r\n\r\n flat_list = flat_list = [item for sublist in chembl_finds for item in sublist]\r\n\r\n if len(flat_list) == 0:\r\n chembl_scrapped.append(np.nan)\r\n elif len(flat_list) != 0:\r\n chembl_scrapped.append((\",\".join(flat_list)))\r\n\r\n return names_scrapped,chembl_scrapped\r\n\r\n# print(scraping_drug_names_from_DrugBank_website(\"DB00207\"))\r\n# print(scraping_drug_names_from_DrugBank_website(\"DB14972\"))\r\n# print(scraping_drug_names_from_DrugBank_website(\"DB01370\"))\r\n# print(scraping_drug_names_from_DrugBank_website(\"DB12182\"))\r\n\r\n##function call\r\nct_df = pd.read_csv(r\"C:\\Users\\rsurulinathan\\Downloads\\selecttrial_idd.csv\",error_bad_lines=False)\r\nct_df['drugs'] = ct_df['drugs'].str[1:-1].str.split(',').tolist()\r\nct_df['drugs'] = ct_df['drugs'].apply(lambda x: [i.strip() for i in x] if type(x)== list else x)\r\n\r\ndrug_list = set()\r\nfor index, entries in enumerate(ct_df['drugs'].values):\r\n if type(entries) == list:\r\n for drug in entries:\r\n drug_list.add(drug)\r\n\r\n\r\nprint(len( (list(sorted(drug_list)))))\r\n\r\n\r\nf=open(\"drugbank_chembl_english.csv\",'w',encoding=\"utf-8\")\r\nf1=open(\"drug_error.txt\",\"w\")\r\nfor drug in list(sorted(drug_list)):\r\n f1.write(\"The drug for the scrappig process is {}\\n\".format(drug))\r\n english, chembl = scraping_drug_names_from_DrugBank_website(drug)\r\n f.write(\"{},{},{}\\n\".format(drug,english,chembl))\r\n\r\n\r\n#https://stackoverflow.com/questions/27092833/unicodeencodeerror-charmap-codec-cant-encode-characters"
] | [
[
"pandas.read_csv"
]
] |
CheungBH/mmpose | [
"d3cfde961a0ffe60ff018dfb6999ad84fa9818c5"
] | [
"mmpose/models/detectors/bottom_up.py"
] | [
"import math\nimport warnings\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.image import imwrite\nfrom mmcv.visualization.image import imshow\n\nfrom mmpose.core.evaluation import (aggregate_results, get_group_preds,\n get_multi_stage_outputs)\nfrom mmpose.core.post_processing.group import HeatmapParser\nfrom .. import builder\nfrom ..registry import POSENETS\nfrom .base import BasePose\n\ntry:\n from mmcv.runner import auto_fp16\nexcept ImportError:\n warnings.warn('auto_fp16 from mmpose will be deprecated from v0.15.0'\n 'Please install mmcv>=1.1.4')\n from mmpose.core import auto_fp16\n\n\[email protected]_module()\nclass BottomUp(BasePose):\n \"\"\"Bottom-up pose detectors.\n\n Args:\n backbone (dict): Backbone modules to extract feature.\n keypoint_head (dict): Keypoint head to process feature.\n train_cfg (dict): Config for training. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path to the pretrained models.\n loss_pose (None): Deprecated arguments. Please use\n `loss_keypoint` for heads instead.\n \"\"\"\n\n def __init__(self,\n backbone,\n keypoint_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n loss_pose=None):\n super().__init__()\n self.fp16_enabled = False\n\n self.backbone = builder.build_backbone(backbone)\n\n if keypoint_head is not None:\n\n if 'loss_keypoint' not in keypoint_head and loss_pose is not None:\n warnings.warn(\n '`loss_pose` for BottomUp is deprecated, '\n 'use `loss_keypoint` for heads instead. See '\n 'https://github.com/open-mmlab/mmpose/pull/382'\n ' for more information.', DeprecationWarning)\n keypoint_head['loss_keypoint'] = loss_pose\n\n self.keypoint_head = builder.build_head(keypoint_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self.use_udp = test_cfg.get('use_udp', False)\n self.parser = HeatmapParser(self.test_cfg)\n self.init_weights(pretrained=pretrained)\n\n @property\n def with_keypoint(self):\n \"\"\"Check if has keypoint_head.\"\"\"\n return hasattr(self, 'keypoint_head')\n\n def init_weights(self, pretrained=None):\n \"\"\"Weight initialization for model.\"\"\"\n self.backbone.init_weights(pretrained)\n if self.with_keypoint:\n self.keypoint_head.init_weights()\n\n @auto_fp16(apply_to=('img', ))\n def forward(self,\n img=None,\n targets=None,\n masks=None,\n joints=None,\n img_metas=None,\n return_loss=True,\n return_heatmap=False,\n **kwargs):\n \"\"\"Calls either forward_train or forward_test depending on whether\n return_loss is True.\n Note:\n batch_size: N\n num_keypoints: K\n num_img_channel: C\n img_width: imgW\n img_height: imgH\n heatmaps weight: W\n heatmaps height: H\n max_num_people: M\n Args:\n img(torch.Tensor[NxCximgHximgW]): Input image.\n targets(List(torch.Tensor[NxKxHxW])): Multi-scale target heatmaps.\n masks(List(torch.Tensor[NxHxW])): Masks of multi-scale target\n heatmaps\n joints(List(torch.Tensor[NxMxKx2])): Joints of multi-scale target\n heatmaps for ae loss\n img_metas(dict):Information about val&test\n By default this includes:\n - \"image_file\": image path\n - \"aug_data\": input\n - \"test_scale_factor\": test scale factor\n - \"base_size\": base size of input\n - \"center\": center of image\n - \"scale\": scale of image\n - \"flip_index\": flip index of keypoints\n\n return loss(bool): Option to 'return_loss'. 'return_loss=True' for\n training, 'return_loss=False' for validation & test\n return_heatmap (bool) : Option to return heatmap.\n\n Returns:\n dict|tuple: if 'return_loss' is true, then return losses.\n Otherwise, return predicted poses, scores, image\n paths and heatmaps.\n \"\"\"\n\n if return_loss:\n return self.forward_train(img, targets, masks, joints, img_metas,\n **kwargs)\n return self.forward_test(\n img, img_metas, return_heatmap=return_heatmap, **kwargs)\n\n def forward_train(self, img, targets, masks, joints, img_metas, **kwargs):\n \"\"\"Forward the bottom-up model and calculate the loss.\n\n Note:\n batch_size: N\n num_keypoints: K\n num_img_channel: C\n img_width: imgW\n img_height: imgH\n heatmaps weight: W\n heatmaps height: H\n max_num_people: M\n\n Args:\n img(torch.Tensor[NxCximgHximgW]): Input image.\n targets(List(torch.Tensor[NxKxHxW])): Multi-scale target heatmaps.\n masks(List(torch.Tensor[NxHxW])): Masks of multi-scale target\n heatmaps\n joints(List(torch.Tensor[NxMxKx2])): Joints of multi-scale target\n heatmaps for ae loss\n img_metas(dict):Information about val&test\n By default this includes:\n - \"image_file\": image path\n - \"aug_data\": input\n - \"test_scale_factor\": test scale factor\n - \"base_size\": base size of input\n - \"center\": center of image\n - \"scale\": scale of image\n - \"flip_index\": flip index of keypoints\n\n Returns:\n dict: The total loss for bottom-up\n \"\"\"\n\n output = self.backbone(img)\n\n if self.with_keypoint:\n output = self.keypoint_head(output)\n\n # if return loss\n losses = dict()\n if self.with_keypoint:\n keypoint_losses = self.keypoint_head.get_loss(\n output, targets, masks, joints)\n losses.update(keypoint_losses)\n\n return losses\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network FLOPs.\n\n See ``tools/get_flops.py``.\n\n Args:\n img (torch.Tensor): Input image.\n\n Returns:\n Tensor: Outputs.\n \"\"\"\n output = self.backbone(img)\n if self.with_keypoint:\n output = self.keypoint_head(output)\n return output\n\n def forward_test(self, img, img_metas, return_heatmap=False, **kwargs):\n \"\"\"Inference the bottom-up model.\n\n Note:\n Batchsize = N (currently support batchsize = 1)\n num_img_channel: C\n img_width: imgW\n img_height: imgH\n\n Args:\n flip_index (List(int)):\n aug_data (List(Tensor[NxCximgHximgW])): Multi-scale image\n test_scale_factor (List(float)): Multi-scale factor\n base_size (Tuple(int)): Base size of image when scale is 1\n center (np.ndarray): center of image\n scale (np.ndarray): the scale of image\n \"\"\"\n assert img.size(0) == 1\n assert len(img_metas) == 1\n\n img_metas = img_metas[0]\n\n aug_data = img_metas['aug_data']\n\n test_scale_factor = img_metas['test_scale_factor']\n base_size = img_metas['base_size']\n center = img_metas['center']\n scale = img_metas['scale']\n\n result = {}\n\n aggregated_heatmaps = None\n tags_list = []\n for idx, s in enumerate(sorted(test_scale_factor, reverse=True)):\n image_resized = aug_data[idx].to(img.device)\n\n features = self.backbone(image_resized)\n if self.with_keypoint:\n outputs = self.keypoint_head(features)\n\n if self.test_cfg.get('flip_test', True):\n # use flip test\n features_flipped = self.backbone(\n torch.flip(image_resized, [3]))\n if self.with_keypoint:\n outputs_flipped = self.keypoint_head(features_flipped)\n else:\n outputs_flipped = None\n\n _, heatmaps, tags = get_multi_stage_outputs(\n outputs,\n outputs_flipped,\n self.test_cfg['num_joints'],\n self.test_cfg['with_heatmaps'],\n self.test_cfg['with_ae'],\n self.test_cfg['tag_per_joint'],\n img_metas['flip_index'],\n self.test_cfg['project2image'],\n base_size,\n align_corners=self.use_udp)\n\n aggregated_heatmaps, tags_list = aggregate_results(\n s,\n aggregated_heatmaps,\n tags_list,\n heatmaps,\n tags,\n test_scale_factor,\n self.test_cfg['project2image'],\n self.test_cfg.get('flip_test', True),\n align_corners=self.use_udp)\n\n # average heatmaps of different scales\n aggregated_heatmaps = aggregated_heatmaps / float(\n len(test_scale_factor))\n tags = torch.cat(tags_list, dim=4)\n\n # perform grouping\n grouped, scores = self.parser.parse(aggregated_heatmaps, tags,\n self.test_cfg['adjust'],\n self.test_cfg['refine'])\n\n preds = get_group_preds(\n grouped,\n center,\n scale, [aggregated_heatmaps.size(3),\n aggregated_heatmaps.size(2)],\n use_udp=self.use_udp)\n\n image_paths = []\n image_paths.append(img_metas['image_file'])\n\n if return_heatmap:\n output_heatmap = aggregated_heatmaps.detach().cpu().numpy()\n else:\n output_heatmap = None\n\n result['preds'] = preds\n result['scores'] = scores\n result['image_paths'] = image_paths\n result['output_heatmap'] = output_heatmap\n\n return result\n\n def show_result(self,\n img,\n result,\n skeleton=None,\n kpt_score_thr=0.3,\n bbox_color=None,\n pose_kpt_color=None,\n pose_limb_color=None,\n radius=4,\n thickness=1,\n font_scale=0.5,\n win_name='',\n show=False,\n show_keypoint_weight=False,\n wait_time=0,\n out_file=None):\n \"\"\"Draw `result` over `img`.\n\n Args:\n img (str or Tensor): The image to be displayed.\n result (list[dict]): The results to draw over `img`\n (bbox_result, pose_result).\n skeleton (list[list]): The connection of keypoints.\n kpt_score_thr (float, optional): Minimum score of keypoints\n to be shown. Default: 0.3.\n pose_kpt_color (np.array[Nx3]`): Color of N keypoints.\n If None, do not draw keypoints.\n pose_limb_color (np.array[Mx3]): Color of M limbs.\n If None, do not draw limbs.\n radius (int): Radius of circles.\n thickness (int): Thickness of lines.\n font_scale (float): Font scales of texts.\n win_name (str): The window name.\n show (bool): Whether to show the image. Default: False.\n show_keypoint_weight (bool): Whether to change the transparency\n using the predicted confidence scores of keypoints.\n wait_time (int): Value of waitKey param.\n Default: 0.\n out_file (str or None): The filename to write the image.\n Default: None.\n\n Returns:\n Tensor: Visualized image only if not `show` or `out_file`\n \"\"\"\n\n img = mmcv.imread(img)\n img = img.copy()\n img_h, img_w, _ = img.shape\n\n pose_result = []\n for res in result:\n pose_result.append(res['keypoints'])\n\n for _, kpts in enumerate(pose_result):\n # draw each point on image\n if pose_kpt_color is not None:\n assert len(pose_kpt_color) == len(kpts)\n for kid, kpt in enumerate(kpts):\n x_coord, y_coord, kpt_score = int(kpt[0]), int(\n kpt[1]), kpt[2]\n if kpt_score > kpt_score_thr:\n if show_keypoint_weight:\n img_copy = img.copy()\n r, g, b = pose_kpt_color[kid]\n cv2.circle(img_copy, (int(x_coord), int(y_coord)),\n radius, (int(r), int(g), int(b)), -1)\n transparency = max(0, min(1, kpt_score))\n cv2.addWeighted(\n img_copy,\n transparency,\n img,\n 1 - transparency,\n 0,\n dst=img)\n else:\n r, g, b = pose_kpt_color[kid]\n cv2.circle(img, (int(x_coord), int(y_coord)),\n radius, (int(r), int(g), int(b)), -1)\n\n # draw limbs\n if skeleton is not None and pose_limb_color is not None:\n assert len(pose_limb_color) == len(skeleton)\n for sk_id, sk in enumerate(skeleton):\n pos1 = (int(kpts[sk[0] - 1, 0]), int(kpts[sk[0] - 1, 1]))\n pos2 = (int(kpts[sk[1] - 1, 0]), int(kpts[sk[1] - 1, 1]))\n if (pos1[0] > 0 and pos1[0] < img_w and pos1[1] > 0\n and pos1[1] < img_h and pos2[0] > 0\n and pos2[0] < img_w and pos2[1] > 0\n and pos2[1] < img_h\n and kpts[sk[0] - 1, 2] > kpt_score_thr\n and kpts[sk[1] - 1, 2] > kpt_score_thr):\n r, g, b = pose_limb_color[sk_id]\n if show_keypoint_weight:\n img_copy = img.copy()\n X = (pos1[0], pos2[0])\n Y = (pos1[1], pos2[1])\n mX = np.mean(X)\n mY = np.mean(Y)\n length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5\n angle = math.degrees(\n math.atan2(Y[0] - Y[1], X[0] - X[1]))\n stickwidth = 2\n polygon = cv2.ellipse2Poly(\n (int(mX), int(mY)),\n (int(length / 2), int(stickwidth)), int(angle),\n 0, 360, 1)\n cv2.fillConvexPoly(img_copy, polygon,\n (int(r), int(g), int(b)))\n transparency = max(\n 0,\n min(\n 1, 0.5 *\n (kpts[sk[0] - 1, 2] + kpts[sk[1] - 1, 2])))\n cv2.addWeighted(\n img_copy,\n transparency,\n img,\n 1 - transparency,\n 0,\n dst=img)\n else:\n cv2.line(\n img,\n pos1,\n pos2, (int(r), int(g), int(b)),\n thickness=thickness)\n\n if show:\n imshow(img, win_name, wait_time)\n\n if out_file is not None:\n imwrite(img, out_file)\n\n return img\n"
] | [
[
"torch.cat",
"numpy.mean",
"torch.flip"
]
] |
muyangren1234/Task-oriented_Sensing_Quality | [
"af2c35cd2315fd76acdbf91ec8b03351bc4bd9b9"
] | [
"ball_recognition/classification/combine_csv.py"
] | [
"import csv\nimport os\nimport numpy as np\nimport random\n#import requests\n\ndeployment_name=[\"Garage\", \"Aisle_rug\", \"Bridge\",\"Hall\", \"Aisle\", \"Livingroom_rug\",\"Livingroom_base\",\"Garage_k\",\"Outdoor\"]\ndeployment_name=[\"Lab_beam\",\"Aisle_beam\"]\nsensor_list =[1,2,3,4,5,6]\nloc_n = 5\n\nwrite_csv_name ='ext_result_' + str(loc_n) + '.csv'\nwr_csv_f = open(write_csv_name, 'w',newline='')\nwr_csv = csv.writer(wr_csv_f)\nwr_csv.writerow(deployment_name)\n\n\nfor sensor in sensor_list:\n sensor_result= np.zeros([9, len(deployment_name)])\n dep_count = 0\n for dep in deployment_name:\n file_name='./CMs/' + str(dep) + '_'+ str(sensor)+'/' +str(dep) + '_' +str(sensor) + '_' + str(loc_n) + '.csv'\n csv_bar = open(file_name, 'r')\n reader = csv.reader(csv_bar)\n tmp_result=np.zeros(9)\n count =0\n for item in reader:\n if reader.line_num ==1:\n continue\n tmp_result[count] = item[0]\n count = count +1\n \n csv_bar.close()\n sensor_result[:,dep_count] = tmp_result\n dep_count = dep_count+1\n #print(tmp_result)\n print(sensor_result)\n for kk in range(9):\n row_re = sensor_result[kk,:]\n wr_csv.writerow(row_re)\n wr_csv.writerow('\\n')\n print('\\n\\n')\nwr_csv_f.close()"
] | [
[
"numpy.zeros"
]
] |
NguyenVanVu0499/baitap_bigdata | [
"bab1359e63264d9dfad90c62babff541ad3a2bd5"
] | [
"services/upload/src/char_classification/data_provider.py"
] | [
"import tensorflow.keras as keras\nimport numpy as np\n\nfrom services.upload.src.char_classification import data_utils\n\n\nclass Datasets(object):\n def __init__(self):\n self.all_data = []\n\n # Input data\n self.digits_data = data_utils.get_digits_data('./data/digits.npy')\n self.alphas_data = data_utils.get_alphas_data('./data/alphas.npy')\n\n # Preprocess\n self.convert_data_format()\n\n def gen(self):\n np.random.shuffle(self.all_data)\n images = []\n labels = []\n\n for i in range(len(self.all_data)):\n image, label = self.all_data[i]\n images.append(image)\n labels.append(label)\n\n labels = keras.utils.to_categorical(labels, num_classes=32)\n return images, labels\n\n def convert_data_format(self):\n # Digits data\n for i in range(len(self.digits_data)):\n image = self.digits_data[i][0]\n label = self.digits_data[i][1]\n self.all_data.append((image, label))\n\n # Alpha data\n nb_alphas_data = len(self.alphas_data)\n for i in range(nb_alphas_data * 8):\n image = self.alphas_data[i % nb_alphas_data][0]\n label = self.alphas_data[i % nb_alphas_data][1]\n self.all_data.append((image, label))\n"
] | [
[
"tensorflow.keras.utils.to_categorical",
"numpy.random.shuffle"
]
] |
KawashiroNitori/blind-watermark | [
"38270b9a0cdf574935254f3182a155344eb35b1f"
] | [
"decode.py"
] | [
"# coding=utf-8\r\nimport cv2\r\nimport numpy as np\r\nimport random\r\nimport os\r\nfrom argparse import ArgumentParser\r\nALPHA = 5\r\n\r\n\r\ndef build_parser():\r\n parser = ArgumentParser()\r\n parser.add_argument('--original', dest='ori', required=True)\r\n parser.add_argument('--image', dest='img', required=True)\r\n parser.add_argument('--result', dest='res', required=True)\r\n parser.add_argument('--alpha', dest='alpha', default=ALPHA)\r\n return parser\r\n\r\n\r\ndef main():\r\n parser = build_parser()\r\n options = parser.parse_args()\r\n ori = options.ori\r\n img = options.img\r\n res = options.res\r\n alpha = float(options.alpha)\r\n if not os.path.isfile(ori):\r\n parser.error(\"original image %s does not exist.\" % ori)\r\n if not os.path.isfile(img):\r\n parser.error(\"image %s does not exist.\" % img)\r\n decode(ori, img, res, alpha)\r\n\r\n\r\ndef decode(ori_path, img_path, res_path, alpha):\r\n ori = cv2.imread(ori_path)\r\n img = cv2.imread(img_path)\r\n ori_f = np.fft.fft2(ori)\r\n img_f = np.fft.fft2(img)\r\n height, width = ori.shape[0], ori.shape[1]\r\n watermark = (ori_f - img_f) / alpha\r\n watermark = np.real(watermark)\r\n res = np.zeros(watermark.shape)\r\n random.seed(height + width)\r\n x = range(height / 2)\r\n y = range(width)\r\n random.shuffle(x)\r\n random.shuffle(y)\r\n for i in range(height / 2):\r\n for j in range(width):\r\n res[x[i]][y[j]] = watermark[i][j]\r\n cv2.imwrite(res_path, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100])\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
] | [
[
"numpy.real",
"numpy.fft.fft2",
"numpy.zeros"
]
] |
tforgaard/pytorch_geometric_temporal | [
"d3a6a55119cb8cc38cb6d941ba8f74879d02c4b8"
] | [
"torch_geometric_temporal/nn/attention/mtgnn.py"
] | [
"from __future__ import division\n\nimport numbers\nfrom typing import Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torch.nn.functional as F\n\n\nclass Linear(nn.Module):\n r\"\"\"An implementation of the linear layer, conducting 2D convolution.\n For details see this paper: `\"Connecting the Dots: Multivariate Time Series Forecasting with Graph Neural Networks.\"\n <https://arxiv.org/pdf/2005.11650.pdf>`_\n\n Args:\n c_in (int): Number of input channels.\n c_out (int): Number of output channels.\n bias (bool, optional): Whether to have bias. Default: True.\n \"\"\"\n\n def __init__(self, c_in: int, c_out: int, bias: bool = True):\n super(Linear, self).__init__()\n self._mlp = torch.nn.Conv2d(\n c_in, c_out, kernel_size=(1, 1), padding=(0, 0), stride=(1, 1), bias=bias\n )\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n else:\n nn.init.uniform_(p)\n\n def forward(self, X: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n Making a forward pass of the linear layer.\n\n Arg types:\n * **X** (Pytorch Float Tensor) - Input tensor, with shape (batch_size, c_in, num_nodes, seq_len).\n\n Return types:\n * **X** (PyTorch Float Tensor) - Output tensor, with shape (batch_size, c_out, num_nodes, seq_len).\n \"\"\"\n return self._mlp(X)\n\n\nclass MixProp(nn.Module):\n r\"\"\"An implementation of the dynatic mix-hop propagation layer.\n For details see this paper: `\"Connecting the Dots: Multivariate Time Series Forecasting with Graph Neural Networks.\"\n <https://arxiv.org/pdf/2005.11650.pdf>`_\n\n Args:\n c_in (int): Number of input channels.\n c_out (int): Number of output channels.\n gdep (int): Depth of graph convolution.\n dropout (float): Dropout rate.\n alpha (float): Ratio of retaining the root nodes's original states, a value between 0 and 1.\n \"\"\"\n\n def __init__(self, c_in: int, c_out: int, gdep: int, dropout: float, alpha: float):\n super(MixProp, self).__init__()\n self._mlp = Linear((gdep + 1) * c_in, c_out)\n self._gdep = gdep\n self._dropout = dropout\n self._alpha = alpha\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n else:\n nn.init.uniform_(p)\n\n def forward(self, X: torch.FloatTensor, A: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n Making a forward pass of mix-hop propagation.\n\n Arg types:\n * **X** (Pytorch Float Tensor) - Input feature Tensor, with shape (batch_size, c_in, num_nodes, seq_len).\n * **A** (PyTorch Float Tensor) - Adjacency matrix, with shape (num_nodes, num_nodes).\n\n Return types:\n * **H_0** (PyTorch Float Tensor) - Hidden representation for all nodes, with shape (batch_size, c_out, num_nodes, seq_len).\n \"\"\"\n A = A + torch.eye(A.size(0)).to(X.device)\n d = A.sum(1)\n H = X\n H_0 = X\n A = A / d.view(-1, 1)\n for _ in range(self._gdep):\n H = self._alpha * X + (1 - self._alpha) * torch.einsum(\n \"ncwl,vw->ncvl\", (H, A)\n )\n H_0 = torch.cat((H_0, H), dim=1)\n H_0 = self._mlp(H_0)\n return H_0\n\n\nclass DilatedInception(nn.Module):\n r\"\"\"An implementation of the dilated inception layer.\n For details see this paper: `\"Connecting the Dots: Multivariate Time Series Forecasting with Graph Neural Networks.\"\n <https://arxiv.org/pdf/2005.11650.pdf>`_\n\n Args:\n c_in (int): Number of input channels.\n c_out (int): Number of output channels.\n kernel_set (list of int): List of kernel sizes.\n dilated_factor (int, optional): Dilation factor.\n \"\"\"\n\n def __init__(self, c_in: int, c_out: int, kernel_set: list, dilation_factor: int):\n super(DilatedInception, self).__init__()\n self._time_conv = nn.ModuleList()\n self._kernel_set = kernel_set\n c_out = int(c_out / len(self._kernel_set))\n for kern in self._kernel_set:\n self._time_conv.append(\n nn.Conv2d(c_in, c_out, (1, kern), dilation=(1, dilation_factor))\n )\n self._reset_parameters()\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n else:\n nn.init.uniform_(p)\n\n def forward(self, X_in: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n Making a forward pass of dilated inception.\n\n Arg types:\n * **X_in** (Pytorch Float Tensor) - Input feature Tensor, with shape (batch_size, c_in, num_nodes, seq_len).\n\n Return types:\n * **X** (PyTorch Float Tensor) - Hidden representation for all nodes,\n with shape (batch_size, c_out, num_nodes, seq_len-6).\n \"\"\"\n X = []\n for i in range(len(self._kernel_set)):\n X.append(self._time_conv[i](X_in))\n for i in range(len(self._kernel_set)):\n X[i] = X[i][..., -X[-1].size(3) :]\n X = torch.cat(X, dim=1)\n return X\n\n\nclass GraphConstructor(nn.Module):\n r\"\"\"An implementation of the graph learning layer to construct an adjacency matrix.\n For details see this paper: `\"Connecting the Dots: Multivariate Time Series Forecasting with Graph Neural Networks.\"\n <https://arxiv.org/pdf/2005.11650.pdf>`_\n\n Args:\n nnodes (int): Number of nodes in the graph.\n k (int): Number of largest values to consider in constructing the neighbourhood of a node (pick the \"nearest\" k nodes).\n dim (int): Dimension of the node embedding.\n alpha (float, optional): Tanh alpha for generating adjacency matrix, alpha controls the saturation rate\n xd (int, optional): Static feature dimension, default None.\n \"\"\"\n\n def __init__(\n self, nnodes: int, k: int, dim: int, alpha: float, xd: Optional[int] = None\n ):\n super(GraphConstructor, self).__init__()\n if xd is not None:\n self._static_feature_dim = xd\n self._linear1 = nn.Linear(xd, dim)\n self._linear2 = nn.Linear(xd, dim)\n else:\n self._embedding1 = nn.Embedding(nnodes, dim)\n self._embedding2 = nn.Embedding(nnodes, dim)\n self._linear1 = nn.Linear(dim, dim)\n self._linear2 = nn.Linear(dim, dim)\n\n self._k = k\n self._alpha = alpha\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n else:\n nn.init.uniform_(p)\n\n def forward(\n self, idx: torch.LongTensor, FE: Optional[torch.FloatTensor] = None\n ) -> torch.FloatTensor:\n \"\"\"\n Making a forward pass to construct an adjacency matrix from node embeddings.\n\n Arg types:\n * **idx** (Pytorch Long Tensor) - Input indices, a permutation of the number of nodes, default None (no permutation).\n * **FE** (Pytorch Float Tensor, optional) - Static feature, default None.\n Return types:\n * **A** (PyTorch Float Tensor) - Adjacency matrix constructed from node embeddings.\n \"\"\"\n\n if FE is None:\n nodevec1 = self._embedding1(idx)\n nodevec2 = self._embedding2(idx)\n else:\n assert FE.shape[1] == self._static_feature_dim\n nodevec1 = FE[idx, :]\n nodevec2 = nodevec1\n\n nodevec1 = torch.tanh(self._alpha * self._linear1(nodevec1))\n nodevec2 = torch.tanh(self._alpha * self._linear2(nodevec2))\n\n a = torch.mm(nodevec1, nodevec2.transpose(1, 0)) - torch.mm(\n nodevec2, nodevec1.transpose(1, 0)\n )\n A = F.relu(torch.tanh(self._alpha * a))\n mask = torch.zeros(idx.size(0), idx.size(0)).to(A.device)\n mask.fill_(float(\"0\"))\n s1, t1 = A.topk(self._k, 1)\n mask.scatter_(1, t1, s1.fill_(1))\n A = A * mask\n return A\n\n\nclass LayerNormalization(nn.Module):\n __constants__ = [\"normalized_shape\", \"weight\", \"bias\", \"eps\", \"elementwise_affine\"]\n r\"\"\"An implementation of the layer normalization layer.\n For details see this paper: `\"Connecting the Dots: Multivariate Time Series Forecasting with Graph Neural Networks.\" \n <https://arxiv.org/pdf/2005.11650.pdf>`_\n\n Args:\n normalized_shape (int): Input shape from an expected input of size.\n eps (float, optional): Value added to the denominator for numerical stability. Default: 1e-5.\n elementwise_affine (bool, optional): Whether to conduct elementwise affine transformation or not. Default: True.\n \"\"\"\n\n def __init__(\n self, normalized_shape: int, eps: float = 1e-5, elementwise_affine: bool = True\n ):\n super(LayerNormalization, self).__init__()\n self._normalized_shape = tuple(normalized_shape)\n self._eps = eps\n self._elementwise_affine = elementwise_affine\n if self._elementwise_affine:\n self._weight = nn.Parameter(torch.Tensor(*normalized_shape))\n self._bias = nn.Parameter(torch.Tensor(*normalized_shape))\n else:\n self.register_parameter(\"_weight\", None)\n self.register_parameter(\"_bias\", None)\n self._reset_parameters()\n\n def _reset_parameters(self):\n if self._elementwise_affine:\n init.ones_(self._weight)\n init.zeros_(self._bias)\n\n def forward(self, X: torch.FloatTensor, idx: torch.LongTensor) -> torch.FloatTensor:\n \"\"\"\n Making a forward pass of layer normalization.\n\n Arg types:\n * **X** (Pytorch Float Tensor) - Input tensor,\n with shape (batch_size, feature_dim, num_nodes, seq_len).\n * **idx** (Pytorch Long Tensor) - Input indices.\n\n Return types:\n * **X** (PyTorch Float Tensor) - Output tensor,\n with shape (batch_size, feature_dim, num_nodes, seq_len).\n \"\"\"\n if self._elementwise_affine:\n return F.layer_norm(\n X,\n tuple(X.shape[1:]),\n self._weight[:, idx, :],\n self._bias[:, idx, :],\n self._eps,\n )\n else:\n return F.layer_norm(\n X, tuple(X.shape[1:]), self._weight, self._bias, self._eps\n )\n\n\nclass MTGNNLayer(nn.Module):\n r\"\"\"An implementation of the MTGNN layer.\n For details see this paper: `\"Connecting the Dots: Multivariate Time Series Forecasting with Graph Neural Networks.\"\n <https://arxiv.org/pdf/2005.11650.pdf>`_\n\n Args:\n dilation_exponential (int): Dilation exponential.\n rf_size_i (int): Size of receptive field.\n kernel_size (int): Size of kernel for convolution, to calculate receptive field size.\n j (int): Iteration index.\n residual_channels (int): Residual channels.\n conv_channels (int): Convolution channels.\n skip_channels (int): Skip channels.\n kernel_set (list of int): List of kernel sizes.\n new_dilation (int): Dilation.\n layer_norm_affline (bool): Whether to do elementwise affine in Layer Normalization.\n gcn_true (bool): Whether to add graph convolution layer.\n seq_length (int): Length of input sequence.\n receptive_field (int): Receptive field.\n dropout (float): Droupout rate.\n gcn_depth (int): Graph convolution depth.\n num_nodes (int): Number of nodes in the graph.\n propalpha (float): Prop alpha, ratio of retaining the root nodes's original states in mix-hop propagation, a value between 0 and 1.\n\n \"\"\"\n\n def __init__(\n self,\n dilation_exponential: int,\n rf_size_i: int,\n kernel_size: int,\n j: int,\n residual_channels: int,\n conv_channels: int,\n skip_channels: int,\n kernel_set: list,\n new_dilation: int,\n layer_norm_affline: bool,\n gcn_true: bool,\n seq_length: int,\n receptive_field: int,\n dropout: float,\n gcn_depth: int,\n num_nodes: int,\n propalpha: float,\n ):\n super(MTGNNLayer, self).__init__()\n self._dropout = dropout\n self._gcn_true = gcn_true\n\n if dilation_exponential > 1:\n rf_size_j = int(\n rf_size_i\n + (kernel_size - 1)\n * (dilation_exponential ** j - 1)\n / (dilation_exponential - 1)\n )\n else:\n rf_size_j = rf_size_i + j * (kernel_size - 1)\n\n self._filter_conv = DilatedInception(\n residual_channels,\n conv_channels,\n kernel_set=kernel_set,\n dilation_factor=new_dilation,\n )\n\n self._gate_conv = DilatedInception(\n residual_channels,\n conv_channels,\n kernel_set=kernel_set,\n dilation_factor=new_dilation,\n )\n\n self._residual_conv = nn.Conv2d(\n in_channels=conv_channels,\n out_channels=residual_channels,\n kernel_size=(1, 1),\n )\n\n if seq_length > receptive_field:\n self._skip_conv = nn.Conv2d(\n in_channels=conv_channels,\n out_channels=skip_channels,\n kernel_size=(1, seq_length - rf_size_j + 1),\n )\n else:\n self._skip_conv = nn.Conv2d(\n in_channels=conv_channels,\n out_channels=skip_channels,\n kernel_size=(1, receptive_field - rf_size_j + 1),\n )\n\n if gcn_true:\n self._mixprop_conv1 = MixProp(\n conv_channels, residual_channels, gcn_depth, dropout, propalpha\n )\n\n self._mixprop_conv2 = MixProp(\n conv_channels, residual_channels, gcn_depth, dropout, propalpha\n )\n\n if seq_length > receptive_field:\n self._normalization = LayerNormalization(\n (residual_channels, num_nodes, seq_length - rf_size_j + 1),\n elementwise_affine=layer_norm_affline,\n )\n\n else:\n self._normalization = LayerNormalization(\n (residual_channels, num_nodes, receptive_field - rf_size_j + 1),\n elementwise_affine=layer_norm_affline,\n )\n self._reset_parameters()\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n else:\n nn.init.uniform_(p)\n\n def forward(\n self,\n X: torch.FloatTensor,\n X_skip: torch.FloatTensor,\n A_tilde: Optional[torch.FloatTensor],\n idx: torch.LongTensor,\n training: bool,\n ) -> torch.FloatTensor:\n \"\"\"\n Making a forward pass of MTGNN layer.\n\n Arg types:\n * **X** (PyTorch FloatTensor) - Input feature tensor,\n with shape (batch_size, in_dim, num_nodes, seq_len).\n * **X_skip** (PyTorch FloatTensor) - Input feature tensor for skip connection,\n with shape (batch_size, in_dim, num_nodes, seq_len).\n * **A_tilde** (Pytorch FloatTensor or None) - Predefined adjacency matrix.\n * **idx** (Pytorch LongTensor) - Input indices.\n * **training** (bool) - Whether in traning mode.\n\n Return types:\n * **X** (PyTorch FloatTensor) - Output sequence tensor,\n with shape (batch_size, seq_len, num_nodes, seq_len).\n * **X_skip** (PyTorch FloatTensor) - Output feature tensor for skip connection,\n with shape (batch_size, in_dim, num_nodes, seq_len).\n \"\"\"\n X_residual = X\n X_filter = self._filter_conv(X)\n X_filter = torch.tanh(X_filter)\n X_gate = self._gate_conv(X)\n X_gate = torch.sigmoid(X_gate)\n X = X_filter * X_gate\n X = F.dropout(X, self._dropout, training=training)\n X_skip = self._skip_conv(X) + X_skip\n if self._gcn_true:\n X = self._mixprop_conv1(X, A_tilde) + self._mixprop_conv2(\n X, A_tilde.transpose(1, 0)\n )\n else:\n X = self._residual_conv(X)\n\n X = X + X_residual[:, :, :, -X.size(3) :]\n X = self._normalization(X, idx)\n return X, X_skip\n\n\nclass MTGNN(nn.Module):\n r\"\"\"An implementation of the Multivariate Time Series Forecasting Graph Neural Networks.\n For details see this paper: `\"Connecting the Dots: Multivariate Time Series Forecasting with Graph Neural Networks.\"\n <https://arxiv.org/pdf/2005.11650.pdf>`_\n\n Args:\n gcn_true (bool): Whether to add graph convolution layer.\n build_adj (bool): Whether to construct adaptive adjacency matrix.\n gcn_depth (int): Graph convolution depth.\n num_nodes (int): Number of nodes in the graph.\n kernel_set (list of int): List of kernel sizes.\n kernel_size (int): Size of kernel for convolution, to calculate receptive field size.\n dropout (float): Droupout rate.\n subgraph_size (int): Size of subgraph.\n node_dim (int): Dimension of nodes.\n dilation_exponential (int): Dilation exponential.\n conv_channels (int): Convolution channels.\n residual_channels (int): Residual channels.\n skip_channels (int): Skip channels.\n end_channels (int): End channels.\n seq_length (int): Length of input sequence.\n in_dim (int): Input dimension.\n out_dim (int): Output dimension.\n layers (int): Number of layers.\n propalpha (float): Prop alpha, ratio of retaining the root nodes's original states in mix-hop propagation, a value between 0 and 1.\n tanhalpha (float): Tanh alpha for generating adjacency matrix, alpha controls the saturation rate.\n layer_norm_affline (bool): Whether to do elementwise affine in Layer Normalization.\n xd (int, optional): Static feature dimension, default None.\n \"\"\"\n\n def __init__(\n self,\n gcn_true: bool,\n build_adj: bool,\n gcn_depth: int,\n num_nodes: int,\n kernel_set: list,\n kernel_size: int,\n dropout: float,\n subgraph_size: int,\n node_dim: int,\n dilation_exponential: int,\n conv_channels: int,\n residual_channels: int,\n skip_channels: int,\n end_channels: int,\n seq_length: int,\n in_dim: int,\n out_dim: int,\n layers: int,\n propalpha: float,\n tanhalpha: float,\n layer_norm_affline: bool,\n xd: Optional[int] = None,\n ):\n super(MTGNN, self).__init__()\n\n self._gcn_true = gcn_true\n self._build_adj_true = build_adj\n self._num_nodes = num_nodes\n self._dropout = dropout\n self._seq_length = seq_length\n self._layers = layers\n self._idx = torch.arange(self._num_nodes)\n\n self._mtgnn_layers = nn.ModuleList()\n\n self._graph_constructor = GraphConstructor(\n num_nodes, subgraph_size, node_dim, alpha=tanhalpha, xd=xd\n )\n\n self._set_receptive_field(dilation_exponential, kernel_size, layers)\n\n new_dilation = 1\n for j in range(1, layers + 1):\n self._mtgnn_layers.append(\n MTGNNLayer(\n dilation_exponential=dilation_exponential,\n rf_size_i=1,\n kernel_size=kernel_size,\n j=j,\n residual_channels=residual_channels,\n conv_channels=conv_channels,\n skip_channels=skip_channels,\n kernel_set=kernel_set,\n new_dilation=new_dilation,\n layer_norm_affline=layer_norm_affline,\n gcn_true=gcn_true,\n seq_length=seq_length,\n receptive_field=self._receptive_field,\n dropout=dropout,\n gcn_depth=gcn_depth,\n num_nodes=num_nodes,\n propalpha=propalpha,\n )\n )\n\n new_dilation *= dilation_exponential\n\n self._setup_conv(\n in_dim, skip_channels, end_channels, residual_channels, out_dim\n )\n\n self._reset_parameters()\n\n def _setup_conv(\n self, in_dim, skip_channels, end_channels, residual_channels, out_dim\n ):\n\n self._start_conv = nn.Conv2d(\n in_channels=in_dim, out_channels=residual_channels, kernel_size=(1, 1)\n )\n\n if self._seq_length > self._receptive_field:\n\n self._skip_conv_0 = nn.Conv2d(\n in_channels=in_dim,\n out_channels=skip_channels,\n kernel_size=(1, self._seq_length),\n bias=True,\n )\n\n self._skip_conv_E = nn.Conv2d(\n in_channels=residual_channels,\n out_channels=skip_channels,\n kernel_size=(1, self._seq_length - self._receptive_field + 1),\n bias=True,\n )\n\n else:\n self._skip_conv_0 = nn.Conv2d(\n in_channels=in_dim,\n out_channels=skip_channels,\n kernel_size=(1, self._receptive_field),\n bias=True,\n )\n\n self._skip_conv_E = nn.Conv2d(\n in_channels=residual_channels,\n out_channels=skip_channels,\n kernel_size=(1, 1),\n bias=True,\n )\n\n self._end_conv_1 = nn.Conv2d(\n in_channels=skip_channels,\n out_channels=end_channels,\n kernel_size=(1, 1),\n bias=True,\n )\n\n self._end_conv_2 = nn.Conv2d(\n in_channels=end_channels,\n out_channels=out_dim,\n kernel_size=(1, 1),\n bias=True,\n )\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n else:\n nn.init.uniform_(p)\n\n def _set_receptive_field(self, dilation_exponential, kernel_size, layers):\n if dilation_exponential > 1:\n self._receptive_field = int(\n 1\n + (kernel_size - 1)\n * (dilation_exponential ** layers - 1)\n / (dilation_exponential - 1)\n )\n else:\n self._receptive_field = layers * (kernel_size - 1) + 1\n\n def forward(\n self,\n X_in: torch.FloatTensor,\n A_tilde: Optional[torch.FloatTensor] = None,\n idx: Optional[torch.LongTensor] = None,\n FE: Optional[torch.FloatTensor] = None,\n ) -> torch.FloatTensor:\n \"\"\"\n Making a forward pass of MTGNN.\n\n Arg types:\n * **X_in** (PyTorch FloatTensor) - Input sequence, with shape (batch_size, in_dim, num_nodes, seq_len).\n * **A_tilde** (Pytorch FloatTensor, optional) - Predefined adjacency matrix, default None.\n * **idx** (Pytorch LongTensor, optional) - Input indices, a permutation of the num_nodes, default None (no permutation).\n * **FE** (Pytorch FloatTensor, optional) - Static feature, default None.\n\n Return types:\n * **X** (PyTorch FloatTensor) - Output sequence for prediction, with shape (batch_size, seq_len, num_nodes, 1).\n \"\"\"\n seq_len = X_in.size(3)\n assert (\n seq_len == self._seq_length\n ), \"Input sequence length not equal to preset sequence length.\"\n\n if self._seq_length < self._receptive_field:\n X_in = nn.functional.pad(\n X_in, (self._receptive_field - self._seq_length, 0, 0, 0)\n )\n\n if self._gcn_true:\n if self._build_adj_true:\n if idx is None:\n A_tilde = self._graph_constructor(self._idx.to(X_in.device), FE=FE)\n else:\n A_tilde = self._graph_constructor(idx, FE=FE)\n\n X = self._start_conv(X_in)\n X_skip = self._skip_conv_0(\n F.dropout(X_in, self._dropout, training=self.training)\n )\n if idx is None:\n for mtgnn in self._mtgnn_layers:\n X, X_skip = mtgnn(\n X, X_skip, A_tilde, self._idx.to(X_in.device), self.training\n )\n else:\n for mtgnn in self._mtgnn_layers:\n X, X_skip = mtgnn(X, X_skip, A_tilde, idx, self.training)\n\n X_skip = self._skip_conv_E(X) + X_skip\n X = F.relu(X_skip)\n X = F.relu(self._end_conv_1(X))\n X = self._end_conv_2(X)\n return X\n"
] | [
[
"torch.nn.Linear",
"torch.sigmoid",
"torch.cat",
"torch.nn.init.uniform_",
"torch.nn.ModuleList",
"torch.arange",
"torch.einsum",
"torch.nn.functional.dropout",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.ones_",
"torch.nn.functional.relu",
"torch.nn.Conv2d",
"torch.tanh",
"torch.nn.init.zeros_",
"torch.nn.functional.pad",
"torch.Tensor",
"torch.nn.Embedding"
]
] |
dumpmemory/pytorch-lightning-template | [
"a698876ad51c9a7a4ca3bd60b930f03eaf3f4d90"
] | [
"special/kfold/data/standard_data.py"
] | [
"# Copyright 2021 Zhongyang Zhang\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport os.path as op\nimport numpy as np\nimport pickle as pkl\nimport torch.utils.data as data\n\nfrom torchvision import transforms\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\n\n\nclass StandardData(data.Dataset):\n def __init__(self, data_dir=r'data/ref',\n class_num=9,\n train=True,\n no_augment=True,\n aug_prob=0.5,\n img_mean=(0.485, 0.456, 0.406),\n img_std=(0.229, 0.224, 0.225),\n kfold=0,\n fold_num=0):\n # Set all input args as attributes\n self.__dict__.update(locals())\n self.aug = train and not no_augment\n\n self.check_files()\n\n def check_files(self):\n # This part is the core code block for load your own dataset.\n # You can choose to scan a folder, or load a file list pickle\n # file, or any other formats. The only thing you need to gua-\n # rantee is the `self.path_list` must be given a valid value. \n file_list_path = op.join(self.data_dir, 'file_list.pkl')\n with open(file_list_path, 'rb') as f:\n file_list = pkl.load(f)\n\n if self.kfold != 0:\n kf = KFold(n_splits=self.kfold, shuffle=True, random_state=2333)\n fl_train_idx, fl_val_idx = list(kf.split(file_list))[self.fold_num]\n fl_train = np.array(file_list)[fl_train_idx]\n fl_val = np.array(file_list)[fl_val_idx]\n else:\n fl_train, fl_val = train_test_split(\n file_list, test_size=0.2, random_state=2333)\n\n self.path_list = fl_train if self.train else fl_val\n\n label_file = './data/ref/label_dict.pkl'\n with open(label_file, 'rb') as f:\n self.label_dict = pkl.load(f)\n\n def __len__(self):\n return len(self.path_list)\n\n def to_one_hot(self, idx):\n out = np.zeros(self.class_num, dtype=float)\n out[idx] = 1\n return out\n\n def __getitem__(self, idx):\n path = self.path_list[idx]\n filename = op.splitext(op.basename(path))[0]\n img = np.load(path).transpose(1, 2, 0)\n\n labels = self.to_one_hot(self.label_dict[filename.split('_')[0]])\n labels = torch.from_numpy(labels).float()\n\n trans = torch.nn.Sequential(\n transforms.RandomHorizontalFlip(self.aug_prob),\n transforms.RandomVerticalFlip(self.aug_prob),\n transforms.RandomRotation(10),\n transforms.RandomCrop(128),\n transforms.Normalize(self.img_mean, self.img_std)\n ) if self.train else torch.nn.Sequential(\n transforms.CenterCrop(128),\n transforms.Normalize(self.img_mean, self.img_std)\n )\n\n img_tensor = trans(img)\n\n return img_tensor, labels, filename"
] | [
[
"numpy.array",
"numpy.zeros",
"sklearn.model_selection.train_test_split",
"numpy.load",
"torch.from_numpy",
"sklearn.model_selection.KFold"
]
] |
yejianfeng2014/tacotron | [
"0dccf1f330c8f8f146347d4a4ca2d53a0780fb64"
] | [
"eval.py"
] | [
"# -*- coding: utf-8 -*-\n#/usr/bin/python2\n'''\nBy kyubyong park. [email protected]. \nhttps://www.github.com/kyubyong/tacotron\n'''\n\nfrom __future__ import print_function\n\nfrom hyperparams import Hyperparams as hp\nimport numpy as np\nfrom data_load import load_data\nimport tensorflow as tf\nfrom train import Graph\nfrom utils import load_spectrograms\n\n\ndef eval(): \n # Load graph 载入模型\n g = Graph(mode=\"eval\")\n\n print(\"Evaluation Graph loaded\")\n\n # Load data\n fpaths, text_lengths, texts = load_data(mode=\"eval\")\n\n # Parse\n text = np.fromstring(texts[0], np.int32) # (None,)\n\n fpaths_ = fpaths[0]\n\n tempPath = \"data/LJSpeech-1.1/wav/LJ001-0001.wav\"\n\n fname, mel, mag = load_spectrograms(tempPath)\n\n x = np.expand_dims(text, 0) # (1, None)\n y = np.expand_dims(mel, 0) # (1, None, n_mels*r)\n z = np.expand_dims(mag, 0) # (1, None, n_mfccs)\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint(hp.logdir));\n print(\"Restored!\")\n\n writer = tf.summary.FileWriter(hp.logdir, sess.graph)\n\n # Feed Forward\n ## mel\n y_hat = np.zeros((1, y.shape[1], y.shape[2]), np.float32) # hp.n_mels*hp.r\n for j in range(y.shape[1]):\n _y_hat = sess.run(g.y_hat, {g.x: x, g.y: y_hat})\n y_hat[:, j, :] = _y_hat[:, j, :]\n\n ## mag\n merged, gs = sess.run([g.merged, g.global_step], {g.x:x, g.y:y, g.y_hat: y_hat, g.z: z})\n writer.add_summary(merged, global_step=gs)\n writer.close()\n\nif __name__ == '__main__':\n eval()\n print(\"Done\")\n \n \n"
] | [
[
"tensorflow.train.latest_checkpoint",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.summary.FileWriter",
"numpy.fromstring",
"numpy.expand_dims"
]
] |
ggrrll/ndlib | [
"375d00d69180dc2a38f3690b4bc2cdce40fd86de"
] | [
"ndlib/models/epidemics/SEISModel.py"
] | [
"from ..DiffusionModel import DiffusionModel\nimport numpy as np\nimport future.utils\n\n__author__ = \"Elisa Salatti\"\n__license__ = \"BSD-2-Clause\"\n\n\nclass SEISModel(DiffusionModel):\n \"\"\"\n Model Parameters to be specified via ModelConfig\n\n :param beta: The infection rate (float value in [0,1])\n :param lambda: The recovery rate (float value in [0,1])\n \"\"\"\n\n def __init__(self, graph, seed=None):\n \"\"\"\n Model Constructor\n\n :param graph: A networkx graph object\n \"\"\"\n super(self.__class__, self).__init__(graph, seed)\n self.available_statuses = {\n \"Susceptible\": 0,\n \"Exposed\": 2,\n \"Infected\": 1\n }\n\n self.parameters = {\n \"model\": {\n \"alpha\": {\n \"descr\": \"Incubation period\",\n \"range\": [0, 1],\n \"optional\": False},\n \"beta\": {\n \"descr\": \"Infection rate\",\n \"range\": [0, 1],\n \"optional\": False},\n \"lambda\": {\n \"descr\": \"Recovery rate\",\n \"range\": [0, 1],\n \"optional\": False\n },\n \"tp_rate\": {\n \"descr\": \"Whether if the infection rate depends on the number of infected neighbors\",\n \"range\": [0, 1],\n \"optional\": True,\n \"default\": 1\n }\n },\n \"nodes\": {},\n \"edges\": {},\n }\n\n self.name = \"SEIS\"\n self.progress = {}\n\n def iteration(self, node_status=True):\n \"\"\"\n Execute a single model iteration\n\n :return: Iteration_id, Incremental node status (dictionary node->status)\n \"\"\"\n self.clean_initial_status(self.available_statuses.values())\n\n actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}\n\n if self.actual_iteration == 0:\n self.actual_iteration += 1\n delta, node_count, status_delta = self.status_delta(actual_status)\n if node_status:\n return {\"iteration\": 0, \"status\": actual_status.copy(),\n \"node_count\": node_count.copy(), \"status_delta\": status_delta.copy()}\n else:\n return {\"iteration\": 0, \"status\": {},\n \"node_count\": node_count.copy(), \"status_delta\": status_delta.copy()}\n\n for u in self.graph.nodes:\n\n u_status = self.status[u]\n eventp = np.random.random_sample()\n neighbors = self.graph.neighbors(u)\n if self.graph.directed:\n neighbors = self.graph.predecessors(u)\n\n if u_status == 0: # Susceptible\n infected_neighbors = [v for v in neighbors if self.status[v] == 1]\n triggered = 1 if len(infected_neighbors) > 0 else 0\n\n if self.params['model']['tp_rate'] == 1:\n if eventp < 1 - (1 - self.params['model']['beta']) ** len(infected_neighbors):\n actual_status[u] = 2 # Exposed\n self.progress[u] = 0\n else:\n if eventp < self.params['model']['beta'] * triggered:\n actual_status[u] = 2 # Exposed\n self.progress[u] = 0\n\n elif u_status == 2:\n if self.progress[u] < 1:\n self.progress[u] += self.params['model']['alpha']\n else:\n actual_status[u] = 1 # Infected\n del self.progress[u]\n\n elif u_status == 1:\n if eventp < self.params['model']['lambda']:\n actual_status[u] = 0 # Susceptible\n\n delta, node_count, status_delta = self.status_delta(actual_status)\n self.status = actual_status\n self.actual_iteration += 1\n\n if node_status:\n return {\"iteration\": self.actual_iteration - 1, \"status\": delta.copy(),\n \"node_count\": node_count.copy(), \"status_delta\": status_delta.copy()}\n else:\n return {\"iteration\": self.actual_iteration - 1, \"status\": {},\n \"node_count\": node_count.copy(), \"status_delta\": status_delta.copy()}\n"
] | [
[
"numpy.random.random_sample"
]
] |
srenoes/pyogrio | [
"9398a7f1dae001cc04c7c52c5e4c67882fea20f5"
] | [
"pyogrio/tests/test_raw_io.py"
] | [
"import json\nimport os\n\nimport numpy as np\nfrom numpy import array_equal\nimport pytest\n\nfrom pyogrio import list_layers\nfrom pyogrio.raw import read, write\n\n\ndef test_read(naturalearth_lowres):\n meta, geometry, fields = read(naturalearth_lowres)\n\n assert meta[\"crs\"] == \"EPSG:4326\"\n assert meta[\"geometry_type\"] == \"Polygon\"\n assert meta[\"encoding\"] == \"UTF-8\"\n assert meta[\"fields\"].shape == (5,)\n\n assert meta[\"fields\"].tolist() == [\n \"pop_est\",\n \"continent\",\n \"name\",\n \"iso_a3\",\n \"gdp_md_est\",\n ]\n\n assert len(fields) == 5\n assert len(geometry) == len(fields[0])\n\n # quick test that WKB is a Polygon type\n assert geometry[0][:6] == b\"\\x01\\x06\\x00\\x00\\x00\\x03\"\n\n\ndef test_vsi_read_layers(naturalearth_lowres_vsi):\n assert array_equal(\n list_layers(naturalearth_lowres_vsi), [[\"naturalearth_lowres\", \"Polygon\"]]\n )\n\n meta, geometry, fields = read(naturalearth_lowres_vsi)\n assert geometry.shape == (177,)\n\n\ndef test_read_no_geometry(naturalearth_lowres):\n meta, geometry, fields = read(naturalearth_lowres, read_geometry=False)\n\n assert geometry is None\n\n\ndef test_read_columns(naturalearth_lowres):\n # read no columns or geometry\n meta, geometry, fields = read(naturalearth_lowres, columns=[], read_geometry=False)\n assert geometry is None\n assert len(fields) == 0\n array_equal(meta[\"fields\"], np.empty(shape=(0, 4), dtype=\"object\"))\n\n columns = [\"NAME\", \"NAME_LONG\"]\n meta, geometry, fields = read(\n naturalearth_lowres, columns=columns, read_geometry=False\n )\n array_equal(meta[\"fields\"], columns)\n\n # Repeats should be dropped\n columns = [\"NAME\", \"NAME_LONG\", \"NAME\"]\n meta, geometry, fields = read(\n naturalearth_lowres, columns=columns, read_geometry=False\n )\n array_equal(meta[\"fields\"], columns[:2])\n\n\ndef test_read_skip_features(naturalearth_lowres):\n expected_geometry, expected_fields = read(naturalearth_lowres)[1:]\n geometry, fields = read(naturalearth_lowres, skip_features=10)[1:]\n\n assert len(geometry) == len(expected_geometry) - 10\n assert len(fields[0]) == len(expected_fields[0]) - 10\n\n assert np.array_equal(geometry, expected_geometry[10:])\n # Last field has more variable data\n assert np.array_equal(fields[-1], expected_fields[-1][10:])\n\n\ndef test_read_max_features(naturalearth_lowres):\n expected_geometry, expected_fields = read(naturalearth_lowres)[1:]\n geometry, fields = read(naturalearth_lowres, max_features=2)[1:]\n\n assert len(geometry) == 2\n assert len(fields[0]) == 2\n\n assert np.array_equal(geometry, expected_geometry[:2])\n assert np.array_equal(fields[-1], expected_fields[-1][:2])\n\n\ndef test_read_where(naturalearth_lowres):\n # empty filter should return full set of records\n geometry, fields = read(naturalearth_lowres, where=\"\")[1:]\n assert len(geometry) == 177\n assert len(fields) == 5\n assert len(fields[0]) == 177\n\n # should return singular item\n geometry, fields = read(naturalearth_lowres, where=\"iso_a3 = 'CAN'\")[1:]\n assert len(geometry) == 1\n assert len(fields) == 5\n assert len(fields[0]) == 1\n assert fields[3] == \"CAN\"\n\n # should return items within range\n geometry, fields = read(\n naturalearth_lowres, where=\"POP_EST >= 10000000 AND POP_EST < 100000000\"\n )[1:]\n assert len(geometry) == 75\n assert min(fields[0]) >= 10000000\n assert max(fields[0]) < 100000000\n\n # should match no items\n with pytest.warns(UserWarning, match=\"does not have any features to read\") as w:\n geometry, fields = read(naturalearth_lowres, where=\"iso_a3 = 'INVALID'\")[1:]\n assert len(geometry) == 0\n\n\ndef test_read_where_invalid(naturalearth_lowres):\n with pytest.raises(ValueError, match=\"Invalid SQL\"):\n read(naturalearth_lowres, where=\"invalid\")\n\n\ndef test_write(tmpdir, naturalearth_lowres):\n meta, geometry, field_data = read(naturalearth_lowres)\n\n filename = os.path.join(str(tmpdir), \"test.shp\")\n write(filename, geometry, field_data, **meta)\n\n assert os.path.exists(filename)\n for ext in (\".dbf\", \".prj\"):\n assert os.path.exists(filename.replace(\".shp\", ext))\n\n\ndef test_write_gpkg(tmpdir, naturalearth_lowres):\n meta, geometry, field_data = read(naturalearth_lowres)\n\n filename = os.path.join(str(tmpdir), \"test.gpkg\")\n write(filename, geometry, field_data, driver=\"GPKG\", **meta)\n\n assert os.path.exists(filename)\n\n\ndef test_write_geojson(tmpdir, naturalearth_lowres):\n meta, geometry, field_data = read(naturalearth_lowres)\n\n filename = os.path.join(str(tmpdir), \"test.json\")\n write(filename, geometry, field_data, driver=\"GeoJSON\", **meta)\n\n assert os.path.exists(filename)\n\n data = json.loads(open(filename).read())\n\n assert data[\"type\"] == \"FeatureCollection\"\n assert data[\"name\"] == \"test\"\n assert \"crs\" in data\n assert len(data[\"features\"]) == len(geometry)\n assert not len(\n set(meta[\"fields\"]).difference(data[\"features\"][0][\"properties\"].keys())\n )\n\n\ndef test_write_geojsonseq(tmpdir, naturalearth_lowres):\n meta, geometry, field_data = read(naturalearth_lowres)\n\n filename = os.path.join(str(tmpdir), \"test.json\")\n write(filename, geometry, field_data, driver=\"GeoJSONSeq\", **meta)\n\n assert os.path.exists(filename)\n\n"
] | [
[
"numpy.array_equal",
"numpy.empty"
]
] |
mattip/builder | [
"2d91252afc7407170182cffffe1f578b368a2ac2"
] | [
"analytics/circleci_analyze.py"
] | [
"#!/usr/bin/env python3.7\nfrom datetime import datetime, time\nimport json\nimport requests\nimport itertools\nimport sqlite3\nimport os\nimport sys\nfrom typing import Callable, Dict, Generator, List, MutableSet, Optional\n\n\ndef get_executor_price_rate(executor):\n (etype, eclass) = executor['type'], executor['resource_class']\n assert etype in ['machine', 'external', 'docker', 'macos', 'runner'], f'Unexpected type {etype}:{eclass}'\n if etype == 'machine':\n return {\n 'medium': 10,\n 'large': 20,\n 'xlarge': 100,\n '2xlarge': 200,\n 'gpu.medium': 160,\n 'gpu.large': 320,\n 'gpu.small': 80,\n 'windows.medium': 40,\n 'windows.large': 120,\n 'windows.xlarge': 210,\n 'windows.2xlarge': 500,\n 'windows.gpu.nvidia.medium': 500,\n 'gpu.nvidia.small': 160,\n 'gpu.nvidia.medium': 240,\n 'gpu.nvidia.large': 1000,\n }[eclass]\n if etype == 'macos':\n return {\n 'medium': 50,\n 'large': 100,\n }[eclass]\n if etype == 'docker':\n return {\n 'small': 5,\n 'medium': 10,\n 'medium+': 15,\n 'large': 20,\n 'xlarge': 40,\n '2xlarge': 80,\n '2xlarge+': 100,\n }[eclass]\n if etype == 'runner' or etype == 'external':\n return {\n 'pytorch/amd-gpu': 0,\n }[eclass]\n raise RuntimeError(f'Undefined executor {etype}:{eclass}')\n\n\nprice_per_credit = 6e-4\n\n\ndef get_circleci_token() -> str:\n token_file_path = os.path.join(os.getenv('HOME'), '.circleci_token')\n token = os.getenv('CIRCLECI_TOKEN')\n if token is not None:\n return token\n if not os.path.exists(token_file_path):\n raise RuntimeError('Can not get CirclCI token'\n ' neither from CIRCLECI_TOKEN environment variable,'\n ' nor via ~/.circleci_token file')\n with open(token_file_path) as f:\n return f.read().strip()\n\n\ndef is_workflow_in_progress(workflow: Dict) -> bool:\n return workflow['status'] in ['running', 'not_run', 'failing', 'on_hold']\n\n\ndef str2date(val: str) -> datetime:\n assert val is not None\n return datetime.fromisoformat(val[:-1] if val.endswith('Z') else val)\n\n\nclass CircleCICache:\n def __init__(self, token: Optional[str], db_name: str = 'circleci-cache.db') -> None:\n file_folder = os.path.dirname(__file__)\n self.url_prefix = 'https://circleci.com/api/v2'\n self.session = requests.session()\n self.headers = {\n 'Accept': 'application/json',\n 'Circle-Token': token,\n } if token is not None else None\n self.db = sqlite3.connect(os.path.join(file_folder, db_name))\n self.db.execute('CREATE TABLE IF NOT EXISTS jobs(slug TEXT NOT NULL, job_id INTEGER NOT NULL, json TEXT NOT NULL);')\n self.db.execute('CREATE TABLE IF NOT EXISTS artifacts(slug TEXT NOT NULL, job_id INTEGER NOT NULL, json TEXT NOT NULL);')\n self.db.execute('CREATE UNIQUE INDEX IF NOT EXISTS jobs_key on jobs(slug, job_id);')\n self.db.execute('CREATE TABLE IF NOT EXISTS workflows(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL);')\n self.db.execute('CREATE TABLE IF NOT EXISTS pipeline_workflows(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL);')\n self.db.execute('CREATE TABLE IF NOT EXISTS pipelines(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL, branch TEXT, revision TEXT);')\n self.db.commit()\n\n def is_offline(self) -> bool:\n return self.headers is None\n\n def _get_paged_items_list(self, url: str, params: Optional[Dict] = None, item_count: Optional[int] = -1) -> List:\n rc, token, run_once = [], None, False\n\n def _should_quit():\n nonlocal run_once, rc, token\n if not run_once:\n run_once = True\n return False\n if token is None:\n return True\n if item_count is None:\n return True\n return item_count >= 0 and len(rc) >= item_count\n\n if params is None:\n params = {}\n while not _should_quit():\n if token is not None:\n params['page-token'] = token\n r = self.session.get(url, params=params, headers=self.headers)\n try:\n j = r.json()\n except json.JSONDecodeError:\n print(f\"Failed to decode {rc}\", file=sys.stderr)\n raise\n if 'message' in j:\n raise RuntimeError(f'Failed to get list from {url}: {j[\"message\"]}')\n token = j['next_page_token']\n rc.extend(j['items'])\n return rc\n\n def get_pipelines(self, project: str = 'github/pytorch/pytorch', branch: Optional[str] = None, item_count: Optional[int] = None) -> List:\n if self.is_offline():\n c = self.db.cursor()\n cmd = \"SELECT json from pipelines\"\n if branch is not None:\n cmd += f\" WHERE branch='{branch}'\"\n if item_count is not None and item_count > 0:\n cmd += f\" LIMIT {item_count}\"\n c.execute(cmd)\n return [json.loads(val[0]) for val in c.fetchall()]\n rc = self._get_paged_items_list(f'{self.url_prefix}/project/{project}/pipeline', {'branch': branch} if branch is not None else {}, item_count)\n for pipeline in rc:\n vcs = pipeline['vcs']\n pid, branch, revision, pser = pipeline['id'], vcs['branch'], vcs['revision'], json.dumps(pipeline)\n self.db.execute(\"INSERT OR REPLACE INTO pipelines(id, branch, revision, json) VALUES (?, ?, ?, ?)\", (pid, branch, revision, pser))\n self.db.commit()\n return rc\n\n def get_pipeline_workflows(self, pipeline) -> List:\n c = self.db.cursor()\n c.execute(\"SELECT json FROM pipeline_workflows WHERE id=?\", (pipeline,))\n rc = c.fetchone()\n if rc is not None:\n rc = json.loads(rc[0])\n if not any(is_workflow_in_progress(w) for w in rc) or self.is_offline():\n return rc\n if self.is_offline():\n return []\n rc = self._get_paged_items_list(f'{self.url_prefix}/pipeline/{pipeline}/workflow')\n self.db.execute(\"INSERT OR REPLACE INTO pipeline_workflows(id, json) VALUES (?, ?)\", (pipeline, json.dumps(rc)))\n self.db.commit()\n return rc\n\n def get_workflow_jobs(self, workflow, should_cache=True) -> List:\n c = self.db.cursor()\n c.execute(\"select json from workflows where id=?\", (workflow,))\n rc = c.fetchone()\n if rc is not None:\n return json.loads(rc[0])\n if self.is_offline():\n return []\n rc = self._get_paged_items_list(f'{self.url_prefix}/workflow/{workflow}/job')\n if should_cache:\n self.db.execute(\"INSERT INTO workflows(id, json) VALUES (?, ?)\", (workflow, json.dumps(rc)))\n self.db.commit()\n return rc\n\n def get_job(self, project_slug, job_number) -> Dict:\n c = self.db.cursor()\n c.execute(\"select json from jobs where slug=? and job_id = ?\", (project_slug, job_number))\n rc = c.fetchone()\n if rc is not None:\n return json.loads(rc[0])\n if self.is_offline():\n return {}\n r = self.session.get(f'{self.url_prefix}/project/{project_slug}/job/{job_number}', headers=self.headers)\n try:\n rc = r.json()\n except json.JSONDecodeError:\n print(f\"Failed to decode {rc}\", file=sys.stderr)\n raise\n self.db.execute(\"INSERT INTO jobs(slug,job_id, json) VALUES (?, ?, ?)\", (project_slug, job_number, json.dumps(rc)))\n self.db.commit()\n return rc\n\n def get_job_artifacts(self, project_slug, job_number) -> List[Dict]:\n c = self.db.cursor()\n c.execute(\"select json from artifacts where slug=? and job_id = ?\", (project_slug, job_number))\n rc = c.fetchone()\n if rc is not None:\n return json.loads(rc[0])\n if self.is_offline():\n return [{}]\n rc = self._get_paged_items_list(f\"{self.url_prefix}/project/{project_slug}/{job_number}/artifacts\")\n self.db.execute(\"INSERT INTO artifacts(slug,job_id, json) VALUES (?, ?, ?)\", (project_slug, job_number, json.dumps(rc)))\n self.db.commit()\n return rc\n\n def get_pipeline_jobs(self, project: str = 'github/pytorch/pytorch', branch: Optional[str] = None, item_count: Optional[int] = None) -> Generator:\n for pipeline in self.get_pipelines(project, branch, item_count):\n for workflow in self.get_pipeline_workflows(pipeline['id']):\n in_progress = is_workflow_in_progress(workflow)\n for job in self.get_workflow_jobs(workflow['id'], should_cache=not in_progress):\n yield (pipeline, workflow, job)\n\n def get_jobs_summary(self, slug='gh/pytorch/pytorch', workflow='build') -> Dict:\n items = self._get_paged_items_list(f'{self.url_prefix}/insights/{slug}/workflows/{workflow}/jobs')\n return {item['name']: item for item in items}\n\n def get_job_timeseries(self, job_name: str,\n slug: str = 'gh/pytorch/pytorch',\n workflow: str = 'build',\n branch: Optional[str] = None) -> List:\n params = {'branch': branch} if branch is not None else {}\n items = self._get_paged_items_list(f'{self.url_prefix}/insights/{slug}/workflows/build/jobs/{job_name}', params)\n return [(str2date(x['started_at']), x['duration']) for x in items if x['status'] == 'success']\n\n\ndef aggregate_by_day(series):\n rc = {}\n for (ts, val) in series:\n date = datetime.combine(ts.date(), time())\n valcount = [val, 1.0]\n if date not in rc:\n rc[date] = valcount\n else:\n rc[date] = [sum(x) for x in zip(rc[date], valcount)]\n return [(x, rc[x][0] / rc[x][1]) for x in sorted(rc.keys())]\n\n\ndef filter_names(names: List[str], name_filter: Optional[str] = None) -> List[str]:\n import re\n if name_filter is None:\n return names\n filters = name_filter.split(\",\")\n return [name for name in names if any(re.match(filter, name) for filter in filters)]\n\n\ndef common_prefix(names: List[str]) -> str:\n if len(names) == 0 or len(names[0]) == 0:\n return ''\n if len(names) == 1:\n return names[0]\n rc = names[0][0]\n while rc != names[0] and all(name.startswith(rc) for name in names[1:]):\n rc = names[0][:len(rc) + 1]\n return rc[:-1]\n\n\ndef plot_graph(name_filter: Optional[str] = None,\n output_file: Optional[str] = None,\n branch: Optional[str] = None) -> None:\n import matplotlib.pyplot as plt\n import matplotlib.dates as mdates\n\n ci_cache = CircleCICache(token=get_circleci_token())\n summary = ci_cache.get_jobs_summary()\n test_jobs = [name for name in summary.keys() if name.startswith('pytorch') and 'test' in name]\n filtered_jobs = filter_names(test_jobs, name_filter)\n prefix = common_prefix(filtered_jobs)\n if len(filtered_jobs) == 0:\n print(f'Filter \"{name_filter}\" does not match to any of {test_jobs}')\n return\n series = []\n labels = []\n styles = [f'{color}{style}' for (style, color) in itertools.product(['-', '--', '-.', ':'], ['b', 'g', 'r', 'c', 'm', 'y', 'k'])]\n fig, ax = plt.subplots()\n for name in test_jobs:\n label = f\"{name}(p95 = {int(summary[name]['metrics']['duration_metrics']['p95']/60)} min)\"\n if name not in filtered_jobs:\n print(label)\n continue\n ts = ci_cache.get_job_timeseries(name, branch=branch)\n if len(ts) == 0:\n print(f'{label} time series is empty!')\n continue\n print(f'{label} time series has {len(ts)} elements')\n labels.append(label[len(prefix):])\n series.append(ts)\n x, y = zip(*aggregate_by_day(ts))\n plt.plot(x, [i / 60.0 for i in y], styles[len(labels) % len(styles)])\n plt.legend(labels, loc='upper left')\n plt.title(f'{prefix} timeseries')\n ax.set_ylabel(\"Duration (m)\")\n # Format date\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))\n # Rotate tick labels\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')\n if output_file is not None:\n plt.savefig(output_file)\n else:\n plt.show()\n\n\ndef print_line(line: str, padding: Optional[int] = None, newline: bool = True) -> None:\n if padding is not None and len(line) < padding:\n line += ' ' * (padding - len(line))\n print(line, end='\\n' if newline else '\\r', flush=True)\n\n\ndef fetch_status(branch=None, item_count=50):\n isatty = sys.stdout.isatty()\n padding = os.get_terminal_size().columns - 1 if isatty else None\n ci_cache = CircleCICache(token=get_circleci_token())\n print(f\"About to fetch {item_count} latest pipelines against {branch if branch is not None else 'all branches'}\")\n pipelines = ci_cache.get_pipelines(branch=branch, item_count=item_count)\n total_price, total_master_price = 0, 0\n for pipeline_idx, pipeline in enumerate(pipelines):\n revision = pipeline['vcs']['revision']\n branch = pipeline['vcs']['branch']\n workflows = ci_cache.get_pipeline_workflows(pipeline['id'])\n known_job_ids = []\n for workflow in workflows:\n url = f'https://app.circleci.com/pipelines/github/pytorch/pytorch/{workflow[\"pipeline_number\"]}/workflows/{workflow[\"id\"]}'\n if is_workflow_in_progress(workflow):\n print_line(f'Skipping {url} name:{workflow[\"name\"]} status:{workflow[\"status\"]}',\n newline=not sys.stdout.isatty())\n continue\n rerun = False\n total_credits, test_credits, gpu_credits, wincpu_credits, wingpu_credits = 0, 0, 0, 0, 0\n jobs = ci_cache.get_workflow_jobs(workflow['id'])\n for job in jobs:\n job_name, job_status, job_number = job['name'], job['status'], job.get('job_number', None)\n if job_status in ['blocked', 'canceled', 'unauthorized', 'running', 'not_run', 'failing']:\n continue\n if job_number is None:\n print(job)\n continue\n if job_number in known_job_ids:\n rerun = True\n continue\n job_info = ci_cache.get_job(job['project_slug'], job_number)\n if 'executor' not in job_info:\n print(f'executor not found in {job_info}')\n continue\n job_executor = job_info['executor']\n resource_class = job_executor['resource_class']\n if resource_class is None:\n print(f'resource_class is none for {job_info}')\n continue\n job_on_gpu = 'gpu' in resource_class\n job_on_win = 'windows' in resource_class\n if job_status != 'infrastructure_fail':\n duration = str2date(job_info['stopped_at']) - str2date(job_info['started_at'])\n job_credits = get_executor_price_rate(job_executor) * int(job_info['duration']) * 1e-3 / 60\n else:\n job_credits, duration = 0, 0\n job_cost = job_credits * price_per_credit\n total_credits += job_credits\n if 'test' in job_name or job_name.startswith('smoke_'):\n test_credits += job_credits\n elif job_on_gpu:\n print(f'Running build job {job_name} on GPU!!!')\n if job_on_gpu:\n gpu_credits += job_credits\n if job_on_win:\n wingpu_credits += job_credits\n if job_on_win and not job_on_gpu:\n wincpu_credits += job_credits\n known_job_ids.append(job_number)\n print_line(f' {job_name} {job_status} {duration} ${job_cost:.2f}',\n padding=padding, newline=not isatty)\n # Increment totals\n total_price += total_credits * price_per_credit\n if branch in ['master', 'nightly', 'postnightly', 'release/1.6']:\n total_master_price += total_credits * price_per_credit\n # skip small jobs\n if total_credits * price_per_credit < .1:\n continue\n workflow_status = f'[{pipeline_idx}/{len(pipelines)}]'\n workflow_status += f' {url} {workflow[\"name\"]} status:{workflow[\"status\"]}'\n workflow_status += f' price: ${total_credits * price_per_credit:.2f}'\n workflow_status += ' (Rerun?)' if rerun else ''\n workflow_status += f'\\n\\t\\tdate: {workflow[\"created_at\"]} branch:{branch} revision:{revision}'\n workflow_status += f'\\n\\t\\ttotal credits: {int(total_credits)}'\n if test_credits != 0:\n workflow_status += f' testing: {100 * test_credits / total_credits:.1f}%'\n if gpu_credits != 0:\n workflow_status += f' GPU testing: {100 * gpu_credits / total_credits:.1f}%'\n if wingpu_credits != 0:\n workflow_status += f' WINGPU/GPU: {100 * wingpu_credits / gpu_credits:.1f}%'\n\n if wincpu_credits != 0:\n workflow_status += f' Win CPU: {100 * wincpu_credits / total_credits:.1f}%'\n workflow_status += f' Total: ${total_price:.2f} master fraction: {100 * total_master_price/ total_price:.1f}%'\n print_line(workflow_status, padding=padding)\n\n\ndef plot_heatmap(cov_matrix, names):\n import numpy as np\n import matplotlib.pyplot as plt\n assert cov_matrix.shape == (len(names), len(names))\n fig, ax = plt.subplots()\n ax.imshow(cov_matrix)\n ax.set_xticks(np.arange(len(names)))\n ax.set_yticks(np.arange(len(names)))\n ax.set_xticklabels(names)\n ax.set_yticklabels(names)\n # Rotate tick labels\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')\n # Annotate values\n for i in range(len(names)):\n for j in range(len(names)):\n ax.text(j, i, f'{cov_matrix[i, j]:.2f}', ha='center', va='center', color='w')\n plt.show()\n\n\ndef filter_service_jobs(name):\n if name.startswith('docker'):\n return True\n if name.startswith('binary'):\n return True\n return False\n\n\ndef filter_cuda_test(name):\n if filter_service_jobs(name):\n return False\n if 'libtorch' in name:\n return False\n if 'test' not in name:\n return False\n # Skip jit-profiling tests\n if 'jit-profiling' in name:\n return False\n if 'cuda11' in name:\n return False\n # Skip VS2017 tests\n if 'vs2017' in name:\n return False\n return 'cuda' in name and 'nogpu' not in name\n\n\ndef filter_cuda_build(name):\n if filter_service_jobs(name):\n return False\n if 'libtorch' in name:\n return False\n return 'cuda' in name and name.endswith('build')\n\n\ndef filter_windows_test(name):\n if filter_service_jobs(name):\n return False\n # Skip jit-profiling tests\n if 'jit-profiling' in name:\n return False\n return 'test' in name and 'windows' in name\n\n\ndef compute_covariance(branch='master', name_filter: Optional[Callable[[str], bool]] = None):\n import numpy as np\n revisions: MutableSet[str] = set()\n job_summary: Dict[str, Dict[str, float]] = {}\n\n # Extract data\n print(f\"Computing covariance for {branch if branch is not None else 'all branches'}\")\n ci_cache = CircleCICache(None)\n pipelines = ci_cache.get_pipelines(branch=branch)\n for pipeline in pipelines:\n if pipeline['trigger']['type'] == 'schedule':\n continue\n revision = pipeline['vcs']['revision']\n pipeline_jobs: Dict[str, float] = {}\n blocked_jobs: MutableSet[str] = set()\n workflows = ci_cache.get_pipeline_workflows(pipeline['id'])\n for workflow in workflows:\n if is_workflow_in_progress(workflow):\n continue\n jobs = ci_cache.get_workflow_jobs(workflow['id'])\n for job in jobs:\n job_name = job['name']\n job_status = job['status']\n # Handle renames\n if job_name == 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_NO_AVX2_test':\n job_name = 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_nogpu_NO_AVX2_test'\n if job_name == 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_NO_AVX_NO_AVX2_test':\n job_name = 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_nogpu_NO_AVX_test'\n if job_status in ['infrastructure_fail', 'canceled']:\n continue\n if callable(name_filter) and not name_filter(job_name):\n continue\n if job_status == 'blocked':\n blocked_jobs.add(job_name)\n continue\n if job_name in blocked_jobs:\n blocked_jobs.remove(job_name)\n result = 1.0 if job_status == 'success' else -1.0\n pipeline_jobs[job_name] = result\n # Skip build with blocked job [which usually means build failed due to the test failure]\n if len(blocked_jobs) != 0:\n continue\n # Skip all success workflows\n if all(result == 1.0 for result in pipeline_jobs.values()):\n continue\n revisions.add(revision)\n for job_name in pipeline_jobs:\n if job_name not in job_summary:\n job_summary[job_name] = {}\n job_summary[job_name][revision] = pipeline_jobs[job_name]\n # Analyze results\n job_names = sorted(job_summary.keys())\n # revisions = sorted(revisions)\n job_data = np.zeros((len(job_names), len(revisions)), dtype=np.float)\n print(f\"Number of observations: {len(revisions)}\")\n for job_idx, job_name in enumerate(job_names):\n job_row = job_summary[job_name]\n for rev_idx, revision in enumerate(revisions):\n if revision in job_row:\n job_data[job_idx, rev_idx] = job_row[revision]\n success_rate = job_data[job_idx, ].sum(where=job_data[job_idx, ] > 0.0) / len(job_row)\n present_rate = 1.0 * len(job_row) / len(revisions)\n print(f\"{job_name}: missing {100.0 * (1.0 - present_rate):.2f}% success rate: {100 * success_rate:.2f}%\")\n cov_matrix = np.corrcoef(job_data)\n plot_heatmap(cov_matrix, job_names)\n\n\ndef print_artifacts(branch, item_count, name_filter: Callable[[str], bool]) -> None:\n ci_cache = CircleCICache(token=get_circleci_token())\n for pipeline, _, job in ci_cache.get_pipeline_jobs(branch=branch, item_count=item_count):\n revision = pipeline['vcs']['revision']\n if not name_filter(job[\"name\"]):\n continue\n job_number = job.get(\"job_number\")\n if job_number is None:\n continue\n artifacts = ci_cache.get_job_artifacts('gh/pytorch/pytorch', job_number)\n for artifact in artifacts:\n name = os.path.basename(artifact['path'])\n url = artifact[\"url\"]\n print(f\"{revision} {name} {url}\")\n\n\ndef print_duration(branch, item_count, name_filter: Callable[[str], bool]) -> None:\n ci_cache = CircleCICache(token=get_circleci_token())\n for pipeline, workflow, job in ci_cache.get_pipeline_jobs(branch=branch, item_count=item_count):\n job_name, job_status, job_number = job['name'], job['status'], job.get(\"job_number\")\n revision = pipeline['vcs']['revision']\n if not name_filter(job_name) or job_number is None:\n continue\n if job_status in ['blocked', 'canceled', 'unauthorized', 'running', 'not_run', 'failing']:\n continue\n started_at = str2date(job['started_at'])\n stopped_at = str2date(job['stopped_at'])\n duration = stopped_at - started_at\n print(f\"{job_name} {revision} {duration} {started_at}\")\n\n\ndef parse_arguments():\n from argparse import ArgumentParser\n parser = ArgumentParser(description=\"Download and analyze circle logs\")\n parser.add_argument('--plot-graph', type=str, nargs='?', help=\"Plot job time trends\", const='')\n parser.add_argument('--output', type=str, help=\"Output file name for the graphs\")\n parser.add_argument('--get_artifacts', type=str)\n parser.add_argument('--print-duration', type=str)\n parser.add_argument('--branch', type=str)\n parser.add_argument('--item_count', type=int, default=100)\n parser.add_argument('--compute_covariance', choices=['cuda_test', 'cuda_build', 'windows_test'])\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n if args.get_artifacts is not None:\n print_artifacts(branch=args.branch,\n item_count=args.item_count,\n name_filter=lambda x: args.get_artifacts in x)\n sys.exit(0)\n if args.print_duration is not None:\n print_duration(branch=args.branch,\n item_count=args.item_count,\n name_filter=lambda x: args.print_duration in x)\n sys.exit(0)\n if args.compute_covariance is not None:\n name_filter = {\n 'cuda_test': filter_cuda_test,\n 'cuda_build': filter_cuda_build,\n 'windows_test': filter_windows_test,\n }[args.compute_covariance]\n compute_covariance(branch=args.branch, name_filter=name_filter)\n sys.exit(0)\n if args.plot_graph is not None:\n plot_graph(args.plot_graph, args.output, args.branch)\n sys.exit(0)\n fetch_status(branch=args.branch, item_count=args.item_count)\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.corrcoef"
]
] |
lixiaohaao/CMARL | [
"5581638b62443302d38a89a7ebb3d31b63bf236e"
] | [
"dqn_agent.py"
] | [
"\"\"\"\nCreated on Wednesday Jan 16 2019\n\n@author: Seyed Mohammad Asghari\n@github: https://github.com/s3yyy3d-m\n\"\"\"\n\nimport numpy as np\nimport random\nimport sympy as sp\nimport tensorflow as tf\n\n\nfrom brain import Brain\nfrom uniform_experience_replay import Memory as UER\nfrom prioritized_experience_replay import Memory as PER\n\nMAX_EPSILON = 1.0\nMIN_EPSILON = 0.01\n\nMIN_BETA = 0.4\nMAX_BETA = 1.0\n\n\nclass Agent(object):\n\n epsilon = MAX_EPSILON\n beta = MIN_BETA\n\n def __init__(self, state_size, action_size, bee_index, brain_name, arguments):\n self.state_size = state_size\n self.action_size = action_size\n self.bee_index = bee_index\n self.learning_rate = arguments['learning_rate']\n self.gamma = 0.95\n self.brain = Brain(self.state_size, self.action_size, brain_name, arguments)\n self.memory_model = arguments['memory']\n\n if self.memory_model == 'UER':\n self.memory = UER(arguments['memory_capacity'])\n\n elif self.memory_model == 'PER':\n self.memory = PER(arguments['memory_capacity'], arguments['prioritization_scale'])\n\n else:\n print('Invalid memory model!')\n\n self.target_type = arguments['target_type']\n self.update_target_frequency = arguments['target_frequency']\n self.max_exploration_step = arguments['maximum_exploration']\n self.batch_size = arguments['batch_size']\n self.step = 0\n self.test = arguments['test']\n self.poison_a = arguments['poison_mode']\n if self.test:\n self.epsilon = MIN_EPSILON\n\n def greedy_actor(self, state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.action_size)\n else:\n return np.argmax(self.brain.predict_one_sample(state))\n\n def pre_greedy_actor(self, state):\n if np.random.rand() <= 0.01:\n return random.randrange(self.action_size)\n else:\n return np.argmax(self.brain.predict_one_sample(state))\n\n def poison_actor(self, state):\n if self.poison_a == 0:\n return random.randrange(self.action_size)\n elif self.poison_a == 1:\n # return 4\n return 4\n elif self.poison_a == 2:\n return np.argmin(self.brain.predict_one_sample(state))\n\n def find_targets_per(self, batch):\n batch_len = len(batch)\n\n states = np.array([o[1][0] for o in batch])\n states_ = np.array([o[1][3] for o in batch])\n\n p = self.brain.predict(states)\n p_ = self.brain.predict(states_)\n pTarget_ = self.brain.predict(states_, target=True)\n\n x = np.zeros((batch_len, self.state_size))\n y = np.zeros((batch_len, self.action_size))\n errors = np.zeros(batch_len)\n\n for i in range(batch_len):\n o = batch[i][1]\n s = o[0]\n a = o[1][self.bee_index]\n r = o[2]\n s_ = o[3]\n done = o[4]\n\n t = p[i]\n old_value = t[a]\n if done:\n t[a] = r\n else:\n if self.target_type == 'DDQN':\n t[a] = r + self.gamma * pTarget_[i][np.argmax(p_[i])]\n elif self.target_type == 'DQN':\n t[a] = r + self.gamma * np.amax(pTarget_[i])\n else:\n print('Invalid type for target network!')\n\n x[i] = s\n y[i] = t\n errors[i] = np.abs(t[a] - old_value)\n\n return [x, y, errors]\n\n def find_targets_uer(self, batch):\n batch_len = len(batch)\n\n states = np.array([o[0] for o in batch])\n states_ = np.array([o[3] for o in batch])\n\n p = self.brain.predict(states)\n p_ = self.brain.predict(states_)\n pTarget_ = self.brain.predict(states_, target=True)\n\n x = np.zeros((batch_len, self.state_size))\n y = np.zeros((batch_len, self.action_size))\n errors = np.zeros(batch_len)\n\n for i in range(batch_len):\n o = batch[i]\n s = o[0]\n a = o[1][self.bee_index]\n r = o[2]\n s_ = o[3]\n done = o[4]\n\n t = p[i]\n old_value = t[a]\n if done:\n t[a] = r\n else:\n if self.target_type == 'DDQN':\n t[a] = r + self.gamma * pTarget_[i][np.argmax(p_[i])]\n elif self.target_type == 'DQN':\n t[a] = r + self.gamma * np.amax(pTarget_[i])\n else:\n print('Invalid type for target network!')\n\n x[i] = s\n y[i] = t\n errors[i] = np.abs(t[a] - old_value)\n\n return [x, y]\n\n def observe(self, sample):\n\n if self.memory_model == 'UER':\n self.memory.remember(sample)\n\n elif self.memory_model == 'PER':\n _, _, errors = self.find_targets_per([[0, sample]])\n self.memory.remember(sample, errors[0])\n\n else:\n print('Invalid memory model!')\n\n def decay_epsilon(self):\n # slowly decrease Epsilon based on our experience\n self.step += 1\n\n if self.test:\n self.epsilon = MIN_EPSILON\n self.beta = MAX_BETA\n else:\n if self.step < self.max_exploration_step:\n self.epsilon = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * (self.max_exploration_step - self.step)/self.max_exploration_step\n self.beta = MAX_BETA + (MIN_BETA - MAX_BETA) * (self.max_exploration_step - self.step)/self.max_exploration_step\n else:\n self.epsilon = MIN_EPSILON\n\n def replay(self):\n\n if self.memory_model == 'UER':\n batch = self.memory.sample(self.batch_size)\n x, y = self.find_targets_uer(batch)\n self.brain.train(x, y)\n\n elif self.memory_model == 'PER':\n [batch, batch_indices, batch_priorities] = self.memory.sample(self.batch_size)\n x, y, errors = self.find_targets_per(batch)\n\n normalized_batch_priorities = [float(i) / sum(batch_priorities) for i in batch_priorities]\n importance_sampling_weights = [(self.batch_size * i) ** (-1 * self.beta)\n for i in normalized_batch_priorities]\n normalized_importance_sampling_weights = [float(i) / max(importance_sampling_weights)\n for i in importance_sampling_weights]\n sample_weights = [errors[i] * normalized_importance_sampling_weights[i] for i in range(len(errors))]\n\n self.brain.train(x, y, np.array(sample_weights))\n\n self.memory.update(batch_indices, errors)\n\n else:\n print('Invalid memory model!')\n\n def grad_noise(self, state):\n\n state = state.reshape(1, self.state_size)\n state = state.astype(float)\n epsilon = self.brain.grad_model(state)\n return epsilon\n\n\n\n\n def update_target_model(self):\n if self.step % self.update_target_frequency == 0:\n self.brain.update_target_model()"
] | [
[
"numpy.array",
"numpy.random.rand",
"numpy.zeros",
"numpy.amax",
"numpy.argmax",
"numpy.abs"
]
] |
gunhoo/Drone-Tracking | [
"fcb56dde7af18d36b306873b626a86520e34aab9"
] | [
"rasp based system/dense_main.py"
] | [
"import glob\nimport sys\nimport pyaudio\nimport wave\nimport numpy as np\nimport tensorflow as tf\nimport librosa\nfrom socket import *\nfrom header import *\n\nif len(sys.argv) < 4:\n print(\"Compile error : python main.py [nodeNum] [posX] [posY]\")\n exit(1)\n\nFORMAT = pyaudio.paInt16\nNODE = sys.argv[1]\nposX = sys.argv[2]\nposY = sys.argv[3]\n\n# connection\nclientSocket = socket(AF_INET, SOCK_STREAM)\ntry:\n clientSocket.connect(('192.168.123.3',21535))\nexcept Exception as e:\n print('cannot connect to the server;', e)\n exit()\n\n# open pyaudio\np = pyaudio.PyAudio()\nstream = p.open(format = FORMAT,\n channels = CHANNELS,\n rate = RATE,\n input = True,frames_per_buffer = CHUNK,\n #input_device_index = 0,\n #output_device_index = 0)\n )\n\n# start loop\nprint(\"Start recording...\")\nwhile True:\n try:\n # initailize values\n printer(\"Start\")\n sess = tf.Session()\n init = tf.global_variables_initializer()\n tf.reset_default_graph()\n sess.run(init)\n frames = []\n\n # recording\n for i in range(0, int(RATE/CHUNK*RECORD_SECONDS)):\n data = stream.read(CHUNK, exception_on_overflow=False)\n frames.append(data)\n printer(\"Record\")\n # record/laod wav files\n file_saver(frames, wave, p)\n files = glob.glob(path)\n raw_data = load(files)\n printer(\"I/O\")\n\n # pre-processing\n mfcc_data, y = mfcc4(raw_data, 1)\n printer(\"MFCC\")\n X = np.concatenate((mfcc_data), axis=0)\n X = X.reshape(-1, N_MFCC, N_FRAME, CHANNELS)\n X_input = X.reshape(X.shape[0],-1)\n X = tf.placeholder(tf.float32, shape=[None,N_MFCC*N_FRAME*CHANNELS])\n keep_prob = tf.placeholder(tf.float32)\n\n # Dense layer\n logits = dens(X, keep_prob)\n y = np.hstack(y)\n n_labels = y.shape[0]\n y_encoded = np.zeros((n_labels, N_UNIQ_LABELS))\n y_encoded[np.arange(n_labels),y] = 1\n Y = tf.placeholder(tf.float32, shape=[None, N_UNIQ_LABELS])\n printer(\"layer\")\n # cost optimizer needed??? -> time consuming\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))\n optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cost)\n printer(\"cost-optimizer\")\n\n # model saver\n sess = tf.Session()\n saver = tf.train.Saver()\n saver.restore(sess, './model/Dense/dense_model')\n printer(\"Model saver\")\n\n # prediction\n y_pred = sess.run(tf.argmax(logits,1),feed_dict={X:X_input,keep_prob:1})\n #y_true = sess.run(tf.argmax(y_encoded,1))\n from sklearn.metrics import accuracy_score\n result = \"%d\" %((accuracy_score(y, y_pred)*100)%100)\n printer(result)\n\n ### send packet\n message = NODE + \":\" + str(result) + \":\" + posX + \":\" + posY\n clientSocket.send(message.encode())\n printer(\"TCP\")\n # exception handle\n except KeyboardInterrupt:\n print(\"wait seconds to terminate...\")\n stream.stop_stream()\n stream.close()\n p.terminate()\n clientSocket.close()\n break\n"
] | [
[
"numpy.concatenate",
"tensorflow.train.AdamOptimizer",
"numpy.zeros",
"tensorflow.argmax",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.train.Saver",
"sklearn.metrics.accuracy_score",
"tensorflow.placeholder",
"numpy.arange",
"numpy.hstack",
"tensorflow.global_variables_initializer",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2"
]
] |
Blosc/bloscpack | [
"5efdadf5b6f61e995df1817943afb9629ce28c89"
] | [
"test/test_numpy_io.py"
] | [
"# -*- coding: utf-8 -*-\n# vim :set ft=py:\n\n\nimport numpy as np\nimport numpy.testing as npt\nfrom unittest import mock\nimport pytest\n\n\nfrom bloscpack.abstract_io import (pack,\n )\nfrom bloscpack.args import (BloscArgs,\n calculate_nchunks,\n )\nfrom bloscpack.compat_util import StringIO\nfrom bloscpack.exceptions import (NotANumpyArray,\n ChunkSizeTypeSizeMismatch,\n ObjectNumpyArrayRejection,\n )\nfrom bloscpack.file_io import (PlainFPSource,\n CompressedFPSource,\n CompressedFPSink,\n )\nfrom bloscpack.headers import (decode_blosc_header,\n )\nfrom bloscpack.memory_io import CompressedMemorySource, CompressedMemorySink\nfrom bloscpack.numpy_io import (pack_ndarray,\n unpack_ndarray,\n pack_ndarray_to_bytes,\n unpack_ndarray_from_bytes,\n pack_ndarray_to_file,\n unpack_ndarray_from_file,\n _conv,\n )\nfrom bloscpack.testutil import (create_tmp_files,\n )\n\n\ndef roundtrip_numpy_memory(ndarray):\n sink = CompressedMemorySink()\n pack_ndarray(ndarray, sink)\n source = CompressedMemorySource(sink)\n b = unpack_ndarray(source)\n return npt.assert_array_equal, ndarray, b\n\n\ndef roundtrip_numpy_str(ndarray):\n s = pack_ndarray_to_bytes(ndarray)\n b = unpack_ndarray_from_bytes(s)\n return npt.assert_array_equal, ndarray, b\n\n\ndef roundtrip_numpy_file_pointers(ndarray):\n sio = StringIO()\n sink = CompressedFPSink(sio)\n pack_ndarray(ndarray, sink)\n sio.seek(0)\n source = CompressedFPSource(sio)\n b = unpack_ndarray(source)\n return npt.assert_array_equal, ndarray, b\n\n\ndef roundtrip_numpy_file(ndarray):\n with create_tmp_files() as (tdir, in_file, out_file, dcmp_file):\n pack_ndarray_to_file(ndarray, out_file)\n b = unpack_ndarray_from_file(out_file)\n return npt.assert_array_equal, ndarray, b\n\n\ndef test_conv():\n test_data = (\n ([[u'a', u'f8']], [('a', 'f8')]),\n ([[u'a', u'f8', 2]], [('a', 'f8', 2)]),\n ([[u'a', [[u'b', 'f8']]]], [('a', [('b', 'f8')])]),\n )\n for input_, expected in test_data:\n received = _conv(input_)\n assert expected == received\n\n\ndef test_unpack_exception():\n a = np.arange(50)\n sio = StringIO()\n a_str = a.tobytes()\n source = PlainFPSource(StringIO(a_str))\n sink = CompressedFPSink(sio)\n pack(source, sink, *calculate_nchunks(len(a_str)))\n with pytest.raises(NotANumpyArray):\n unpack_ndarray_from_bytes(sio.getvalue())\n\n\ndef roundtrip_ndarray(a):\n roundtrip_numpy_memory(a)\n roundtrip_numpy_str(a)\n roundtrip_numpy_file_pointers(a)\n roundtrip_numpy_file(a)\n\n\ndef test_numpy_dtypes_shapes_order():\n\n # happy trail\n a = np.arange(50)\n roundtrip_ndarray(a)\n\n for dt in np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:\n a = np.arange(64, dtype=dt)\n roundtrip_ndarray(a)\n a = a.copy().reshape(8, 8)\n roundtrip_ndarray(a)\n a = a.copy().reshape(4, 16)\n roundtrip_ndarray(a)\n a = a.copy().reshape(4, 4, 4)\n roundtrip_ndarray(a)\n a = np.asfortranarray(a)\n assert np.isfortran(a)\n roundtrip_ndarray(a)\n\n # Fixed width string arrays\n a = np.array(['abc', 'def', 'ghi'])\n roundtrip_ndarray(a)\n\n # This actually get's cast to a fixed width string array\n a = np.array([(1, 'abc'), (2, 'def'), (3, 'ghi')])\n roundtrip_ndarray(a)\n\n ## object arrays\n #a = np.array([(1, 'abc'), (2, 'def'), (3, 'ghi')], dtype='object')\n #for case in roundtrip_ndarray(a):\n # case()\n\n # structured array\n a = np.array([('a', 1), ('b', 2)], dtype=[('a', 'S1'), ('b', 'f8')])\n roundtrip_ndarray(a)\n\n # record array\n a = np.array([(1, 'O', 1)],\n dtype=np.dtype([('step', 'int32'),\n ('symbol', '|S1'),\n ('index', 'int32')]))\n roundtrip_ndarray(a)\n\n # and a nested record array\n dt = [('year', '<i4'),\n ('countries', [('c1', [('iso', 'a3'), ('value', '<f4')]),\n ('c2', [('iso', 'a3'), ('value', '<f4')])\n ])\n ]\n a = np.array([(2009, (('USA', 10.),\n ('CHN', 12.))),\n (2010, (('BRA', 10.),\n ('ARG', 12.)))],\n dt)\n roundtrip_ndarray(a)\n\n # what about endianess\n a = np.arange(10, dtype='>i8')\n roundtrip_ndarray(a)\n\n # empty array\n a = np.array([], dtype='f8')\n roundtrip_ndarray(a)\n\n\ndef test_reject_object_array():\n a = np.array([(1, 'abc'), (2, 'def'), (3, 'ghi')], dtype='object')\n with pytest.raises(ObjectNumpyArrayRejection):\n roundtrip_ndarray(a)\n\n\ndef test_reject_nested_object_array():\n a = np.array([(1, 'abc'), (2, 'def'), (3, 'ghi')],\n dtype=[('a', int), ('b', 'object')])\n with pytest.raises(ObjectNumpyArrayRejection):\n roundtrip_ndarray(a)\n\ndef test_backwards_compat():\n\n def old_ndarray_meta(ndarray):\n # This DOESN'T use 'repr', see also:\n # bloscpack.numpy_io._ndarray_meta\n return {'dtype': ndarray.dtype.descr\n if ndarray.dtype.names is not None\n else ndarray.dtype.str,\n 'shape': ndarray.shape,\n 'order': 'F' if np.isfortran(ndarray) else 'C',\n 'container': 'numpy',\n }\n test_data = [np.arange(10),\n np.array([('a', 1), ('b', 2)],\n dtype=[('a', 'S1'), ('b', 'f8')]),\n ]\n\n with mock.patch('bloscpack.numpy_io._ndarray_meta', old_ndarray_meta):\n for a in test_data:\n # uses old version of _ndarray_meta\n c = pack_ndarray_to_bytes(a)\n # should not raise a SyntaxError\n d = unpack_ndarray_from_bytes(c)\n npt.assert_array_equal(a, d)\n\n\ndef test_itemsize_chunk_size_mismatch():\n a = np.arange(1000)\n # typesize of the array is 8, let's glitch the typesize\n for i in [1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15]:\n with pytest.raises(ChunkSizeTypeSizeMismatch):\n pack_ndarray_to_bytes(a, i)\n\n\ndef test_larger_arrays():\n for dt in ('uint64', 'int64', 'float64'):\n a = np.arange(2e4, dtype=dt)\n roundtrip_ndarray(a)\n\n\ndef huge_arrays():\n for dt in ('uint64', 'int64', 'float64'):\n # needs plenty of memory\n a = np.arange(1e8, dtype=dt)\n roundtrip_ndarray(a)\n\n\ndef test_alternate_cname():\n for cname, int_id in [\n ('blosclz', 0),\n ('lz4', 1),\n ('lz4hc', 1),\n ('zlib', 3),\n ('zstd', 4),\n ]:\n blosc_args = BloscArgs(cname=cname)\n array_ = np.linspace(0, 1, int(2e6))\n sink = CompressedMemorySink()\n pack_ndarray(array_, sink, blosc_args=blosc_args)\n blosc_header = decode_blosc_header(sink.chunks[0])\n assert blosc_header['flags'] >> 5 == int_id\n\n\ndef test_typesize_is_set_correctly_with_default_blosc_args():\n a = np.array([1, 2, 3], dtype='uint8')\n sink = CompressedMemorySink()\n pack_ndarray(a, sink)\n expected_args = BloscArgs(typesize=1)\n assert expected_args == sink.blosc_args\n\n\ndef test_typesize_is_set_correctly_with_custom_blosc_args():\n a = np.array([1, 2, 3], dtype='uint8')\n sink = CompressedMemorySink()\n input_args = BloscArgs(clevel=9)\n pack_ndarray(a, sink, blosc_args=input_args)\n expected_args = BloscArgs(clevel=9, typesize=1)\n assert expected_args == sink.blosc_args\n\n\ndef test_roundtrip_slice():\n a = np.arange(100).reshape((10, 10))\n s = a[3:5, 3:5]\n roundtrip_ndarray(s)\n"
] | [
[
"numpy.array",
"numpy.isfortran",
"numpy.testing.assert_array_equal",
"numpy.asfortranarray",
"numpy.arange",
"numpy.dtype"
]
] |
LovecraftianHorror/rust_text_classifier | [
"d062ebf1da2d593a7261bba970642bcc9bd93a06"
] | [
"lib/classifier/__init__.py"
] | [
"from __future__ import annotations\n\nimport pickle\nfrom pathlib import Path\nfrom typing import List, Tuple\n\nfrom numpy import float64\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\n\nfrom lib.classifier.datasets import Category, Posts, PostsLoader\n\n\ndef score_classifier(\n *,\n corpus_path: Path,\n training_percentage: float = 0.8,\n) -> List[Tuple[Category, Category, float]]:\n if training_percentage > 1.0 or training_percentage < 0.0:\n raise ValueError(\"Percentage is represented as a float between 0.0 and 1.0\")\n\n loader = PostsLoader(corpus_path)\n\n num_training_vals = int(loader.num_entries() * training_percentage)\n training_set = loader.take(num_training_vals)\n test_set = loader.take()\n\n classifier = TextClassifier.from_training(training_set)\n predictions = classifier.predict_set(test_set.as_data())\n\n results: List[Tuple[Category, Category, float]] = []\n\n # Gather the results for the predictions\n for ((pred_category, pred_prob), (real_category, text)) in zip(\n predictions, test_set.category_post_pairs\n ):\n results.append((real_category, pred_category, float(pred_prob)))\n\n return results\n\n\nclass TextClassifier:\n categories: List[Category]\n classifier: GridSearchCV\n\n def __init__(self, categories: List[Category], classifier: GridSearchCV) -> None:\n self.categories = list(categories)\n self.classifier = classifier\n\n @classmethod\n def from_cache_file_else_train(\n cls, *, cache_path: Path, corpus_path: Path\n ) -> TextClassifier:\n try:\n classifier = TextClassifier.from_cache_file(cache_path)\n except FileNotFoundError:\n loader = PostsLoader(corpus_path)\n training_set = loader.take()\n classifier = TextClassifier.from_training(training_set)\n\n with cache_path.open(\"wb\") as to_pickle:\n cache_path.parent.mkdir(exist_ok=True, parents=True)\n pickle.dump(classifier.classifier, to_pickle)\n\n return classifier\n\n @classmethod\n def from_cache_file(cls, cache_path: Path) -> TextClassifier:\n with cache_path.open(\"rb\") as pickled:\n grid_search_classifier = pickle.load(pickled)\n\n return cls(categories=list(Category), classifier=grid_search_classifier)\n\n @classmethod\n def from_training(cls, training_set: Posts) -> TextClassifier:\n # Setup a pipeline for the classifier\n # - Generates feature vectors using a count vectorizer\n # - Determines term frequency inverse document frequency\n # - Classifies using a linear SVM\n classifier_pipeline = Pipeline(\n [\n (\"frequency_vectorizer\", TfidfVectorizer(stop_words=\"english\")),\n (\n \"classifier\",\n SGDClassifier(\n penalty=\"l2\",\n tol=None,\n ),\n ),\n ]\n )\n\n # Select optimal pipeline parameters using grid search\n parameters = {\n \"frequency_vectorizer__ngram_range\": [(1, 1), (1, 2)],\n \"frequency_vectorizer__use_idf\": (True, False),\n \"classifier__alpha\": (1e-2, 1e-3),\n # These are the loss fuctions that support `predict_proba`\n # https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html#sklearn.linear_model.SGDClassifier.predict_proba\n \"classifier__loss\": (\"log\", \"modified_huber\"),\n }\n\n classifier = GridSearchCV(classifier_pipeline, parameters, cv=5, n_jobs=-1)\n classifier = classifier.fit(**training_set.as_data_target_kwargs())\n\n return cls(categories=list(Category), classifier=classifier)\n\n def predict(self, text: str) -> Tuple[Category, float64]:\n category = self.categories[self.classifier.predict([text])[0]]\n probabilities = self.classifier.predict_proba([text])[0]\n\n return category, max(probabilities)\n\n def predict_set(self, texts: List[str]) -> List[Tuple[Category, float64]]:\n results = []\n for text in texts:\n results.append(self.predict(text))\n\n return results\n\n def score(self, test_set: Posts) -> float64:\n return self.classifier.score(**test_set.as_data_target_kwargs())\n"
] | [
[
"sklearn.model_selection.GridSearchCV",
"sklearn.linear_model.SGDClassifier",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
kishorekolli/deep_racer_guru | [
"0a0a56103f395f958e8177ee0bd5ae1481f93d98"
] | [
"src/analyze/util/heatmap.py"
] | [
"#\n# DeepRacer Guru\n#\n# Version 3.0 onwards\n#\n# Copyright (c) 2021 dmh23\n#\nimport math\nimport typing\nimport numpy as np\n\nfrom src.graphics.track_graphics import TrackGraphics\nfrom src.utils.colors import get_color_for_data, ColorPalette\n\n\nclass HeatMap:\n #\n # PUBLIC interface\n #\n\n def __init__(self, min_x, min_y, max_x, max_y, granularity, allow_repeats: bool):\n self.min_x = min_x\n self.min_y = min_y\n self.max_x = max_x\n self.max_y = max_y\n self._allow_repeats = allow_repeats\n\n self._granularity = granularity\n\n x_size = self._get_x_index(max_x) + 1\n y_size = self._get_y_index(max_y) + 1\n\n self._stats = [[[] for _ in range(x_size)] for _ in range(y_size)]\n self._last_visitor = [[None] * x_size for _ in range(y_size)]\n\n def visit(self, x, y, visitor, stat: typing.Union[float, int]):\n x_index = self._get_x_index(x)\n y_index = self._get_y_index(y)\n\n if self._allow_repeats or visitor != self._last_visitor[y_index][x_index]:\n self._last_visitor[y_index][x_index] = visitor\n self._stats[y_index][x_index].append(float(stat))\n\n def get_visits_and_scope_range(self, brightness: int):\n assert brightness in [-1, 0, 1, 2]\n\n (visits, _, max_visits) = self._get_stats_array(np.count_nonzero)\n if max_visits == 0:\n return None, None, None\n\n min_visits = max_visits / 10\n\n if brightness == 1:\n min_visits /= 2\n elif brightness == 2:\n min_visits /= 3.5\n elif brightness == -1:\n min_visits *= 1.5\n\n return visits, min_visits, max_visits\n\n def draw_visits(self, track_graphics: TrackGraphics, brightness: int, color_palette: ColorPalette):\n assert brightness in [-1, 0, 1, 2]\n\n visits, min_visits, max_visits = self.get_visits_and_scope_range(brightness)\n\n if not visits:\n return\n\n colour_multiplier = 255 / max_visits / max_visits * 2\n\n if brightness == 1:\n colour_multiplier *= 2\n elif brightness == 2:\n colour_multiplier *= 3.5\n elif brightness == -1:\n colour_multiplier /= 2\n\n for yy, visits in enumerate(visits):\n for xx, visit in enumerate(visits):\n if visit >= min_visits:\n x = self.min_x + self._granularity * xx\n y = self.min_y + self._granularity * yy\n\n data = min(1.0, 30/255 + colour_multiplier / 255 * visit * visit)\n colour = get_color_for_data(data, color_palette)\n track_graphics.plot_box(x, y, x + self._granularity, y + self._granularity, colour)\n\n # NEW way - heatmap itself is given the standard brightness calculation\n def draw_brightness_statistic(self, track_graphics: TrackGraphics, adjust_brightness: int,\n color_palette: ColorPalette, visits_heatmap):\n assert adjust_brightness in [-1, 0, 1, 2]\n\n if adjust_brightness == 1:\n multiplier = 1.1\n elif adjust_brightness == 2:\n multiplier = 1.2\n elif adjust_brightness == -1:\n multiplier = 0.9\n else:\n multiplier = 1.0\n\n (stats, _, _) = self._get_stats_array(np.median, adjust_brightness, visits_heatmap)\n\n for yy, stats in enumerate(stats):\n for xx, stat in enumerate(stats):\n if not math.isnan(stat):\n x = self.min_x + self._granularity * xx\n y = self.min_y + self._granularity * yy\n colour = get_color_for_data(max(0.1, min(1, stat * multiplier)), color_palette)\n track_graphics.plot_box(x, y, x + self._granularity, y + self._granularity, colour)\n\n # Old way - heatmap contains the stats\n def draw_statistic(self, track_graphics: TrackGraphics, brightness: int, color_palette: ColorPalette, visits_heatmap,\n forced_max_stat=-1, forced_min_stat=-1):\n assert brightness in [-1, 0, 1, 2]\n\n (stats, min_stat, max_stat) = self._get_stats_array(np.median, brightness, visits_heatmap)\n if max_stat == 0:\n return\n\n if forced_max_stat > 0:\n max_stat = forced_max_stat\n if forced_min_stat > 0:\n min_stat = forced_min_stat\n\n if brightness == 1:\n max_stat *= 0.93\n elif brightness == 2:\n max_stat *= 0.85\n min_stat *= 0.95\n elif brightness == -1:\n min_stat *= 1.1\n\n if min_stat >= max_stat:\n min_stat = 0.99 * max_stat\n\n stat_range = max_stat - min_stat\n\n for yy, stats in enumerate(stats):\n for xx, stat in enumerate(stats):\n if not math.isnan(stat):\n x = self.min_x + self._granularity * xx\n y = self.min_y + self._granularity * yy\n\n gap_from_best = max_stat - stat\n data = max(0.1, min(1, 1 - 0.9 * gap_from_best / stat_range))\n colour = get_color_for_data(data, color_palette)\n track_graphics.plot_box(x, y, x + self._granularity, y + self._granularity, colour)\n\n #\n # PRIVATE implementation\n #\n\n def _get_x_index(self, value):\n value = max(min(value, self.max_x), self.min_x)\n return round((value - self.min_x - self._granularity / 2) / self._granularity)\n\n def _get_y_index(self, value):\n value = max(min(value, self.max_y), self.min_y)\n return round((value - self.min_y - self._granularity / 2) / self._granularity)\n\n def _get_stats_count(self):\n count = 0\n for y_stats in self._stats:\n for x_stats in y_stats:\n if x_stats:\n count = max(count, len(x_stats))\n return count\n\n def _get_stats_array(self, stat_method: callable, brightness: int = 0, visits_heatmap = None):\n if visits_heatmap:\n visits, min_visits, _ = visits_heatmap.get_visits_and_scope_range(brightness)\n else:\n visits, min_visits = (None, None)\n\n min_value = math.nan\n max_value = 0.0\n new_stats = []\n for yy, y_stats in enumerate(self._stats):\n new_y_stats = []\n for xx, x_stats in enumerate(y_stats):\n stat = math.nan\n if x_stats:\n if not visits_heatmap or visits[yy][xx] >= min_visits:\n stat = stat_method(np.array(x_stats))\n min_value = min(stat, min_value)\n max_value = max(stat, max_value)\n new_y_stats.append(stat)\n new_stats.append(new_y_stats)\n return new_stats, min_value, max_value\n\n def print_debug(self):\n for v in reversed(self._get_stats_array(np.sum)):\n s = \"\"\n for w in v:\n if w == 0:\n s += \" \"\n else:\n s += str(round(w)) + \" \"\n print(s)\n\n\n\n\ndef test_it():\n map = HeatMap(1, 1, 5.99, 7.99, 0.5, True)\n\n print(map._get_x_index(1))\n print(map._get_x_index(1.24))\n print(map._get_x_index(1.25))\n print(map._get_x_index(1.49))\n print(map._get_x_index(1.51))\n\n print(\"-------------------------\")\n\n map.visit(1, 1, \"aaa\", 1)\n map.visit(6, 7, \"bbb\", 1)\n map.visit(6, 7, \"ccc\", 1)\n\n # map.print_debug()\n\n map.visit(5.9, 6.9, \"zzz\", 1)\n\n map.visit(1.26, 1.26, \"a\", 1)\n map.visit(1.4, 1.4, \"b\", 1)\n map.visit(1.6, 1.6, \"c\", 1)\n map.visit(1.8, 1.8, \"d\", 1)\n\n map.visit(3, 3, \"d\", 1)\n map.visit(4, 4, \"d\", 1)\n map.visit(5, 5, \"d\", 1)\n\n map.print_debug()\n\n print(\"=============\")\n print(map._stats[0])\n\n print(map._get_stats_array(np.sum))\n\n\n\n# RUN TEST\n# test_it()\n\n\n\n\n\n\n"
] | [
[
"numpy.array"
]
] |
chakkritte/EEEA-Net | [
"260c2a5c673a806315fc5b529b9c9112c48ca8ae"
] | [
"Transfer/Simplebaseline/lib/models/backbones/ResNet.py"
] | [
"import torch\nimport torch.nn as nn\n\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)\n # while original implementation places the stride at the first 1x1 convolution(self.conv1)\n # according to \"Deep residual learning for image recognition\"https://arxiv.org/abs/1512.03385.\n # This variant is also known as ResNet V1.5 and improves accuracy according to\n # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.\n\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):\n super(ResNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) # 1/2\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # 1/4\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) # 1/8\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) # 1/16\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) # 1/32\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n self.channels = [128*block.expansion, 256*block.expansion, 512 * block.expansion]\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(conv1x1(self.inplanes, planes * block.expansion, stride), norm_layer(planes * block.expansion),)\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def _forward_impl(self, x):\n feats = []\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n feats.append(x)\n x = self.layer3(x)\n feats.append(x)\n x = self.layer4(x)\n feats.append(x)\n if self.fc is not None:\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n return x\n else:\n return feats\n\n def forward(self, x):\n return self._forward_impl(x)\n\n\ndef resnet18(weight_path=None, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\ndef resnet34(weight_path=None, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet50(weight_path=None, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet101(weight_path=None, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model\n\n\ndef resnet152(weight_path=None, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model\n\nif __name__ == '__main__':\n model = resnet18()\n model = model.cuda()\n x = torch.rand(1, 3, 224, 224).cuda()\n y = model(x)\n print(model.channels)"
] | [
[
"torch.nn.Linear",
"torch.rand",
"torch.flatten",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.init.kaiming_normal_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
]
] |
changleilei/bert4keras | [
"48626f05ace6559c5c318a2f453c2296ababa8cd"
] | [
"bert4keras/layers.py"
] | [
"#! -*- coding: utf-8 -*-\n# 自定义层\n\nimport numpy as np\nimport tensorflow as tf\nfrom bert4keras.backend import keras, K, is_tf_keras\nfrom bert4keras.backend import sequence_masking\nfrom bert4keras.backend import recompute_grad\nfrom keras import initializers, activations\nfrom keras.layers import *\n\n\ndef integerize_shape(func):\n \"\"\"装饰器,保证input_shape一定是int或None\n \"\"\"\n def convert(item):\n if hasattr(item, '__iter__'):\n return [convert(i) for i in item]\n elif hasattr(item, 'value'):\n return item.value\n else:\n return item\n\n def new_func(self, input_shape):\n input_shape = convert(input_shape)\n return func(self, input_shape)\n\n return new_func\n\n\nif (not is_tf_keras) and keras.__version__ < '2.3':\n\n class Layer(keras.layers.Layer):\n \"\"\"重新定义Layer,赋予“层中层”功能\n (仅keras 2.3以下版本需要)\n \"\"\"\n def __init__(self, **kwargs):\n super(Layer, self).__init__(**kwargs)\n self.supports_masking = True # 本项目的自定义层均可mask\n\n def __setattr__(self, name, value):\n if isinstance(value, keras.layers.Layer):\n if not hasattr(self, '_layers'):\n self._layers = []\n if value not in self._layers:\n self._layers.append(value)\n super(Layer, self).__setattr__(name, value)\n\n @property\n def trainable_weights(self):\n trainable = getattr(self, 'trainable', True)\n if trainable:\n trainable_weights = super(Layer, self).trainable_weights[:]\n for l in getattr(self, '_layers', []):\n trainable_weights += l.trainable_weights\n return trainable_weights\n else:\n return []\n\n @property\n def non_trainable_weights(self):\n trainable = getattr(self, 'trainable', True)\n non_trainable_weights = super(Layer, self).non_trainable_weights[:]\n for l in getattr(self, '_layers', []):\n if trainable:\n non_trainable_weights += l.non_trainable_weights\n else:\n non_trainable_weights += l.weights\n return non_trainable_weights\n\n if keras.__version__ < '2.2.5':\n\n import inspect\n\n class Model(keras.models.Model):\n \"\"\"重新定义Model,整合fit和fit_generator\n \"\"\"\n def fit(self, x=None, *args, **kwargs):\n if inspect.isgenerator(x):\n return self.fit_generator(x, *args, **kwargs)\n else:\n return super(Model, self).fit(x, *args, **kwargs)\n\n keras.models.Model = Model\n\nelse:\n\n class Layer(keras.layers.Layer):\n def __init__(self, **kwargs):\n super(Layer, self).__init__(**kwargs)\n self.supports_masking = True # 本项目的自定义层均可mask\n\n\nif (not is_tf_keras) or tf.__version__ < '1.15':\n\n if not is_tf_keras:\n NodeBase = keras.engine.base_layer.Node\n else:\n from tensorflow.python.keras.engine import base_layer\n NodeBase = base_layer.Node\n\n class Node(NodeBase):\n \"\"\"修改Node来修复keras下孪生网络的bug\n 注意:这是keras的bug,并不是bert4keras的bug,但keras已经不更新了,\n 所以只好在这里进行修改。tf 1.15+自带的keras已经修改了这个\n bug。\n \"\"\"\n @property\n def arguments(self):\n return self._arguments.copy()\n\n @arguments.setter\n def arguments(self, value):\n self._arguments = value or {}\n\n if not is_tf_keras:\n keras.engine.base_layer.Node = Node\n else:\n base_layer.Node = Node\n\n\nclass GlobalAveragePooling1D(keras.layers.GlobalAveragePooling1D):\n \"\"\"重新定义GlobalAveragePooling1D,支持序列长度为None\n \"\"\"\n def call(self, inputs, mask=None):\n axis = 1 if self.data_format == 'channels_last' else 2\n if mask is not None:\n mask = K.cast(mask, K.floatx())\n mask = mask[..., None] if axis == 1 else mask[:, None]\n return K.sum(inputs * mask, axis=axis) / K.sum(mask, axis=axis)\n else:\n return K.mean(inputs, axis=axis)\n\n\nclass GlobalMaxPooling1D(keras.layers.GlobalMaxPooling1D):\n \"\"\"重新定义GlobalMaxPooling1D,支持mask\n \"\"\"\n def __init__(self, data_format='channels_last', **kwargs):\n super(GlobalMaxPooling1D, self).__init__(data_format, **kwargs)\n self.supports_masking = True\n\n def call(self, inputs, mask=None):\n axis = 1 if self.data_format == 'channels_last' else 2\n inputs = sequence_masking(inputs, mask, '-inf', axis)\n return K.max(inputs, axis=axis)\n\n def compute_mask(self, inputs, mask=None):\n return None\n\n\n# 直接覆盖原对象\nkeras.layers.GlobalAveragePooling1D = GlobalAveragePooling1D\nkeras.layers.GlobalMaxPooling1D = GlobalMaxPooling1D\n\n\nclass Embedding(keras.layers.Embedding):\n \"\"\"拓展Embedding层\n \"\"\"\n def compute_mask(self, inputs, mask=None):\n \"\"\"为了适配T5,保证第一个token不被mask\n \"\"\"\n if K.ndim(inputs) == 2:\n mask = super(Embedding, self).compute_mask(inputs, mask)\n if mask is not None:\n mask1 = K.ones_like(mask[:, :1], dtype='bool')\n mask2 = mask[:, 1:]\n return K.concatenate([mask1, mask2], 1)\n else:\n return mask\n\n def call(self, inputs, mode='embedding'):\n \"\"\"新增mode参数,可以为embedding或dense。如果为embedding,\n 则等价于普通Embedding层;如果为dense,则等价于无bias的Dense层。\n \"\"\"\n if mode == 'embedding':\n return super(Embedding, self).call(inputs)\n else:\n kernel = K.transpose(self.embeddings)\n return K.dot(inputs, kernel)\n\n def compute_output_shape(self, input_shape):\n \"\"\"关于判据,本来是通过缓存call时的mode参数来判断的,但是后来发现\n Keras在使用compute_output_shape的时候不一定配套调用了call函数,\n 所以缓存的mode可能是不准的,因此只能出此下策。\n \"\"\"\n if len(input_shape) == 2:\n return super(Embedding, self).compute_output_shape(input_shape)\n else:\n return input_shape[:2] + (K.int_shape(self.embeddings)[0],)\n\n\nclass BiasAdd(Layer):\n \"\"\"加上偏置项\n \"\"\"\n @integerize_shape\n def build(self, input_shape):\n super(BiasAdd, self).build(input_shape)\n output_dim = input_shape[-1]\n self.bias = self.add_weight(\n name='bias', shape=(output_dim,), initializer='zeros'\n )\n\n def call(self, inputs):\n return K.bias_add(inputs, self.bias)\n\n\nclass Concatenate1D(Layer):\n \"\"\"1维序列拼接层\n 说明:本来该功能可以直接通过Concatenate层来实现,无奈Keras\n 自带的Concatenate层的compute_mask写得不合理,导致一个\n 带mask的序列与一个不带mask的序列拼接会报错,因此干脆\n 自己重写一个好了。\n \"\"\"\n def call(self, inputs):\n return K.concatenate(inputs, axis=1)\n\n def compute_mask(self, inputs, mask=None):\n if mask is not None:\n masks = []\n for i, m in enumerate(mask):\n if m is None:\n m = K.ones_like(inputs[i][..., 0], dtype='bool')\n masks.append(m)\n return K.concatenate(masks, axis=1)\n\n def compute_output_shape(self, input_shape):\n if all([shape[1] for shape in input_shape]):\n seq_len = sum([shape[1] for shape in input_shape])\n return (input_shape[0][0], seq_len, input_shape[0][2])\n else:\n return (input_shape[0][0], None, input_shape[0][2])\n\n\nclass MultiHeadAttention(Layer):\n \"\"\"多头注意力机制\n \"\"\"\n def __init__(\n self,\n heads,\n head_size,\n out_dim=None,\n key_size=None,\n use_bias=True,\n attention_scale=True,\n attention_dropout=None,\n return_attention_scores=False,\n kernel_initializer='glorot_uniform',\n **kwargs\n ):\n super(MultiHeadAttention, self).__init__(**kwargs)\n self.heads = heads\n self.head_size = head_size\n self.out_dim = out_dim or heads * head_size\n self.key_size = key_size or head_size\n self.use_bias = use_bias\n self.attention_scale = attention_scale\n self.attention_dropout = attention_dropout\n self.return_attention_scores = return_attention_scores\n self.kernel_initializer = initializers.get(kernel_initializer)\n\n def build(self, input_shape):\n super(MultiHeadAttention, self).build(input_shape)\n self.q_dense = Dense(\n units=self.key_size * self.heads,\n use_bias=self.use_bias,\n kernel_initializer=self.kernel_initializer\n )\n self.k_dense = Dense(\n units=self.key_size * self.heads,\n use_bias=self.use_bias,\n kernel_initializer=self.kernel_initializer\n )\n self.v_dense = Dense(\n units=self.head_size * self.heads,\n use_bias=self.use_bias,\n kernel_initializer=self.kernel_initializer\n )\n self.o_dense = Dense(\n units=self.out_dim,\n use_bias=self.use_bias,\n kernel_initializer=self.kernel_initializer\n )\n\n @recompute_grad\n def call(self, inputs, mask=None, **kwargs):\n \"\"\"实现多头注意力\n q_mask: 对输入的query序列的mask。\n 主要是将输出结果的padding部分置0。\n v_mask: 对输入的value序列的mask。\n 主要是防止attention读取到padding信息。\n \"\"\"\n q, k, v = inputs[:3]\n q_mask, v_mask = None, None\n if mask is not None:\n q_mask, v_mask = mask[0], mask[2]\n # 线性变换\n qw = self.q_dense(q)\n kw = self.k_dense(k)\n vw = self.v_dense(v)\n # 形状变换\n qw = K.reshape(qw, (-1, K.shape(q)[1], self.heads, self.key_size))\n kw = K.reshape(kw, (-1, K.shape(k)[1], self.heads, self.key_size))\n vw = K.reshape(vw, (-1, K.shape(v)[1], self.heads, self.head_size))\n # Attention\n qkv_inputs = [qw, kw, vw] + inputs[3:]\n qv_masks = [q_mask, v_mask]\n o, a = self.pay_attention_to(qkv_inputs, qv_masks, **kwargs)\n # 完成输出\n o = K.reshape(o, (-1, K.shape(o)[1], self.head_size * self.heads))\n o = self.o_dense(o)\n # 返回结果\n if self.return_attention_scores:\n return [o, a]\n else:\n return o\n\n def pay_attention_to(self, inputs, mask=None, **kwargs):\n \"\"\"实现标准的乘性多头注意力\n a_bias: 对attention矩阵的bias。\n 不同的attention bias对应不同的应用。\n p_bias: 在attention里的位置偏置。\n 一般用来指定相对位置编码的种类。\n 说明: 这里单独分离出pay_attention_to函数,是为了方便\n 继承此类来定义不同形式的atttention;此处要求\n 返回o.shape=(batch_size, seq_len, heads, head_size)。\n \"\"\"\n (qw, kw, vw), n = inputs[:3], 3\n q_mask, v_mask = mask\n a_bias, p_bias = kwargs.get('a_bias'), kwargs.get('p_bias')\n if a_bias:\n a_bias = inputs[n]\n n += 1\n if p_bias == 'rotary':\n cos_pos = K.repeat_elements(inputs[n][..., None, 1::2], 2, -1)\n sin_pos = K.repeat_elements(inputs[n][..., None, ::2], 2, -1)\n qw2 = K.stack([-qw[..., 1::2], qw[..., ::2]], 4)\n qw2 = K.reshape(qw2, K.shape(qw))\n qw = qw * cos_pos + qw2 * sin_pos\n kw2 = K.stack([-kw[..., 1::2], kw[..., ::2]], 4)\n kw2 = K.reshape(kw2, K.shape(kw))\n kw = kw * cos_pos + kw2 * sin_pos\n # Attention\n a = tf.einsum('bjhd,bkhd->bhjk', qw, kw)\n # 处理位置编码\n if p_bias == 'typical_relative':\n position_bias = inputs[n]\n a = a + tf.einsum('bjhd,jkd->bhjk', qw, position_bias)\n elif p_bias == 't5_relative':\n position_bias = K.permute_dimensions(inputs[n], (2, 0, 1))\n a = a + K.expand_dims(position_bias, 0)\n # Attention(续)\n if self.attention_scale:\n a = a / self.key_size**0.5\n if a_bias is not None:\n a = a + a_bias\n a = sequence_masking(a, v_mask, '-inf', -1)\n A = K.softmax(a)\n if self.attention_dropout:\n A = Dropout(self.attention_dropout)(A)\n # 完成输出\n o = tf.einsum('bhjk,bkhd->bjhd', A, vw)\n if p_bias == 'typical_relative':\n o = o + tf.einsum('bhjk,jkd->bjhd', A, position_bias)\n return o, a\n\n def compute_output_shape(self, input_shape):\n o_shape = (input_shape[0][0], input_shape[0][1], self.out_dim)\n if self.return_attention_scores:\n a_shape = (\n input_shape[0][0], self.heads, input_shape[0][1],\n input_shape[1][1]\n )\n return [o_shape, a_shape]\n else:\n return o_shape\n\n def compute_mask(self, inputs, mask=None):\n if mask is not None:\n if self.return_attention_scores:\n return [mask[0], None]\n else:\n return mask[0]\n\n def get_config(self):\n config = {\n 'heads': self.heads,\n 'head_size': self.head_size,\n 'out_dim': self.out_dim,\n 'key_size': self.key_size,\n 'use_bias': self.use_bias,\n 'attention_scale': self.attention_scale,\n 'attention_dropout': self.attention_dropout,\n 'return_attention_scores': self.return_attention_scores,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n }\n base_config = super(MultiHeadAttention, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass LayerNormalization(Layer):\n \"\"\"(Conditional) Layer Normalization\n hidden_*系列参数仅为有条件输入时(conditional=True)使用\n \"\"\"\n def __init__(\n self,\n center=True,\n scale=True,\n epsilon=None,\n conditional=False,\n hidden_units=None,\n hidden_activation='linear',\n hidden_initializer='glorot_uniform',\n **kwargs\n ):\n super(LayerNormalization, self).__init__(**kwargs)\n self.center = center\n self.scale = scale\n self.conditional = conditional\n self.hidden_units = hidden_units\n self.hidden_activation = activations.get(hidden_activation)\n self.hidden_initializer = initializers.get(hidden_initializer)\n self.epsilon = epsilon or 1e-12\n\n def compute_mask(self, inputs, mask=None):\n if self.conditional:\n masks = mask if mask is not None else []\n masks = [m[None] for m in masks if m is not None]\n if len(masks) == 0:\n return None\n else:\n return K.all(K.concatenate(masks, axis=0), axis=0)\n else:\n return mask\n\n def build(self, input_shape):\n super(LayerNormalization, self).build(input_shape)\n\n if self.conditional:\n shape = (input_shape[0][-1],)\n else:\n shape = (input_shape[-1],)\n\n if self.center:\n self.beta = self.add_weight(\n shape=shape, initializer='zeros', name='beta'\n )\n if self.scale:\n self.gamma = self.add_weight(\n shape=shape, initializer='ones', name='gamma'\n )\n\n if self.conditional:\n\n if self.hidden_units is not None:\n self.hidden_dense = Dense(\n units=self.hidden_units,\n activation=self.hidden_activation,\n use_bias=False,\n kernel_initializer=self.hidden_initializer\n )\n\n if self.center:\n self.beta_dense = Dense(\n units=shape[0], use_bias=False, kernel_initializer='zeros'\n )\n if self.scale:\n self.gamma_dense = Dense(\n units=shape[0], use_bias=False, kernel_initializer='zeros'\n )\n\n @recompute_grad\n def call(self, inputs):\n \"\"\"如果是条件Layer Norm,则默认以list为输入,第二个是condition\n \"\"\"\n if self.conditional:\n inputs, cond = inputs\n if self.hidden_units is not None:\n cond = self.hidden_dense(cond)\n for _ in range(K.ndim(inputs) - K.ndim(cond)):\n cond = K.expand_dims(cond, 1)\n if self.center:\n beta = self.beta_dense(cond) + self.beta\n if self.scale:\n gamma = self.gamma_dense(cond) + self.gamma\n else:\n if self.center:\n beta = self.beta\n if self.scale:\n gamma = self.gamma\n\n outputs = inputs\n if self.center:\n mean = K.mean(outputs, axis=-1, keepdims=True)\n outputs = outputs - mean\n if self.scale:\n variance = K.mean(K.square(outputs), axis=-1, keepdims=True)\n std = K.sqrt(variance + self.epsilon)\n outputs = outputs / std * gamma\n if self.center:\n outputs = outputs + beta\n\n return outputs\n\n def compute_output_shape(self, input_shape):\n if self.conditional:\n return input_shape[0]\n else:\n return input_shape\n\n def get_config(self):\n config = {\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'conditional': self.conditional,\n 'hidden_units': self.hidden_units,\n 'hidden_activation': activations.serialize(self.hidden_activation),\n 'hidden_initializer':\n initializers.serialize(self.hidden_initializer),\n }\n base_config = super(LayerNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass PositionEmbedding(Layer):\n \"\"\"定义可训练的位置Embedding\n \"\"\"\n def __init__(\n self,\n input_dim,\n output_dim,\n merge_mode='add',\n hierarchical=None,\n embeddings_initializer='zeros',\n custom_position_ids=False,\n **kwargs\n ):\n super(PositionEmbedding, self).__init__(**kwargs)\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.merge_mode = merge_mode\n self.hierarchical = hierarchical\n self.embeddings_initializer = initializers.get(embeddings_initializer)\n self.custom_position_ids = custom_position_ids\n\n def build(self, input_shape):\n super(PositionEmbedding, self).build(input_shape)\n self.embeddings = self.add_weight(\n name='embeddings',\n shape=(self.input_dim, self.output_dim),\n initializer=self.embeddings_initializer\n )\n\n def call(self, inputs):\n \"\"\"如果custom_position_ids,那么第二个输入为自定义的位置id\n \"\"\"\n if self.custom_position_ids:\n inputs, position_ids = inputs\n if 'int' not in K.dtype(position_ids):\n position_ids = K.cast(position_ids, 'int32')\n else:\n input_shape = K.shape(inputs)\n batch_size, seq_len = input_shape[0], input_shape[1]\n position_ids = K.arange(0, seq_len, dtype='int32')[None]\n\n if self.hierarchical:\n alpha = 0.4 if self.hierarchical is True else self.hierarchical\n embeddings = self.embeddings - alpha * self.embeddings[:1]\n embeddings = embeddings / (1 - alpha)\n embeddings_x = K.gather(embeddings, position_ids // self.input_dim)\n embeddings_y = K.gather(embeddings, position_ids % self.input_dim)\n embeddings = alpha * embeddings_x + (1 - alpha) * embeddings_y\n else:\n if self.custom_position_ids:\n embeddings = K.gather(self.embeddings, position_ids)\n else:\n embeddings = self.embeddings[None, :seq_len]\n\n if self.merge_mode == 'add':\n return inputs + embeddings\n elif self.merge_mode == 'mul':\n return inputs * (embeddings + 1.0)\n elif self.merge_mode == 'zero':\n return embeddings\n else:\n if not self.custom_position_ids:\n embeddings = K.tile(embeddings, [batch_size, 1, 1])\n return K.concatenate([inputs, embeddings])\n\n def compute_output_shape(self, input_shape):\n if self.custom_position_ids:\n input_shape = input_shape[0]\n\n if self.merge_mode in ['add', 'mul', 'zero']:\n return input_shape[:2] + (self.output_dim,)\n else:\n return input_shape[:2] + (input_shape[2] + self.output_dim,)\n\n def get_config(self):\n config = {\n 'input_dim': self.input_dim,\n 'output_dim': self.output_dim,\n 'merge_mode': self.merge_mode,\n 'hierarchical': self.hierarchical,\n 'embeddings_initializer':\n initializers.serialize(self.embeddings_initializer),\n 'custom_position_ids': self.custom_position_ids,\n }\n base_config = super(PositionEmbedding, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass SinusoidalPositionEmbedding(Layer):\n \"\"\"定义Sin-Cos位置Embedding\n \"\"\"\n def __init__(\n self,\n output_dim,\n merge_mode='add',\n custom_position_ids=False,\n **kwargs\n ):\n super(SinusoidalPositionEmbedding, self).__init__(**kwargs)\n self.output_dim = output_dim\n self.merge_mode = merge_mode\n self.custom_position_ids = custom_position_ids\n\n def call(self, inputs):\n \"\"\"如果custom_position_ids,那么第二个输入为自定义的位置id\n \"\"\"\n if self.custom_position_ids:\n seq_len = K.shape(inputs)[1]\n inputs, position_ids = inputs\n if 'float' not in K.dtype(position_ids):\n position_ids = K.cast(position_ids, K.floatx())\n else:\n input_shape = K.shape(inputs)\n batch_size, seq_len = input_shape[0], input_shape[1]\n position_ids = K.arange(0, seq_len, dtype=K.floatx())[None]\n\n indices = K.arange(0, self.output_dim // 2, dtype=K.floatx())\n indices = K.pow(10000.0, -2 * indices / self.output_dim)\n embeddings = tf.einsum('bn,d->bnd', position_ids, indices)\n embeddings = K.stack([K.sin(embeddings), K.cos(embeddings)], axis=-1)\n embeddings = K.reshape(embeddings, (-1, seq_len, self.output_dim))\n\n if self.merge_mode == 'add':\n return inputs + embeddings\n elif self.merge_mode == 'mul':\n return inputs * (embeddings + 1.0)\n elif self.merge_mode == 'zero':\n return embeddings\n else:\n if not self.custom_position_ids:\n embeddings = K.tile(embeddings, [batch_size, 1, 1])\n return K.concatenate([inputs, embeddings])\n\n def compute_output_shape(self, input_shape):\n if self.custom_position_ids:\n input_shape = input_shape[0]\n\n if self.merge_mode in ['add', 'mul', 'zero']:\n return input_shape[:2] + (self.output_dim,)\n else:\n return input_shape[:2] + (input_shape[2] + self.output_dim,)\n\n def get_config(self):\n config = {\n 'output_dim': self.output_dim,\n 'merge_mode': self.merge_mode,\n 'custom_position_ids': self.custom_position_ids,\n }\n base_config = super(SinusoidalPositionEmbedding, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass RelativePositionEmbedding(Layer):\n \"\"\"相对位置编码\n 来自论文:https://arxiv.org/abs/1803.02155\n \"\"\"\n def __init__(\n self, input_dim, output_dim, embeddings_initializer='zeros', **kwargs\n ):\n super(RelativePositionEmbedding, self).__init__(**kwargs)\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.embeddings_initializer = initializers.get(embeddings_initializer)\n\n def build(self, input_shape):\n super(RelativePositionEmbedding, self).build(input_shape)\n self.embeddings = self.add_weight(\n name='embeddings',\n shape=(self.input_dim, self.output_dim),\n initializer=self.embeddings_initializer,\n )\n\n def call(self, inputs):\n pos_ids = self.compute_position_ids(inputs)\n return K.gather(self.embeddings, pos_ids)\n\n def compute_position_ids(self, inputs):\n q, v = inputs\n # 计算位置差\n q_idxs = K.arange(0, K.shape(q)[1], dtype='int32')\n q_idxs = K.expand_dims(q_idxs, 1)\n v_idxs = K.arange(0, K.shape(v)[1], dtype='int32')\n v_idxs = K.expand_dims(v_idxs, 0)\n pos_ids = v_idxs - q_idxs\n # 后处理操作\n max_position = (self.input_dim - 1) // 2\n pos_ids = K.clip(pos_ids, -max_position, max_position)\n pos_ids = pos_ids + max_position\n return pos_ids\n\n def compute_output_shape(self, input_shape):\n return (None, None, self.output_dim)\n\n def compute_mask(self, inputs, mask):\n return mask[0]\n\n def get_config(self):\n config = {\n 'input_dim': self.input_dim,\n 'output_dim': self.output_dim,\n 'embeddings_initializer':\n initializers.serialize(self.embeddings_initializer),\n }\n base_config = super(RelativePositionEmbedding, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass RelativePositionEmbeddingT5(RelativePositionEmbedding):\n \"\"\"Google T5的相对位置编码\n 来自论文:https://arxiv.org/abs/1910.10683\n \"\"\"\n def __init__(\n self,\n input_dim,\n output_dim,\n max_distance=128,\n bidirectional=True,\n embeddings_initializer='zeros',\n **kwargs\n ):\n super(RelativePositionEmbeddingT5,\n self).__init__(input_dim, output_dim, **kwargs)\n self.max_distance = max_distance\n self.bidirectional = bidirectional\n\n def compute_position_ids(self, inputs):\n \"\"\"T5的相对位置分桶(直接翻译自官方T5源码)\n \"\"\"\n q, v = inputs\n # 计算位置差\n q_idxs = K.arange(0, K.shape(q)[1], dtype='int32')\n q_idxs = K.expand_dims(q_idxs, 1)\n v_idxs = K.arange(0, K.shape(v)[1], dtype='int32')\n v_idxs = K.expand_dims(v_idxs, 0)\n pos_ids = v_idxs - q_idxs\n # 后处理操作\n num_buckets, max_distance = self.input_dim, self.max_distance\n ret = 0\n n = -pos_ids\n if self.bidirectional:\n num_buckets //= 2\n ret += K.cast(K.less(n, 0), 'int32') * num_buckets\n n = K.abs(n)\n else:\n n = K.maximum(n, 0)\n # now n is in the range [0, inf)\n max_exact = num_buckets // 2\n is_small = K.less(n, max_exact)\n val_if_large = max_exact + K.cast(\n K.log(K.cast(n, K.floatx()) / max_exact) /\n np.log(max_distance / max_exact) * (num_buckets - max_exact),\n 'int32',\n )\n val_if_large = K.minimum(val_if_large, num_buckets - 1)\n ret += K.switch(is_small, n, val_if_large)\n return ret\n\n def get_config(self):\n config = {\n 'max_distance': self.max_distance,\n 'bidirectional': self.bidirectional,\n }\n base_config = super(RelativePositionEmbeddingT5, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass FeedForward(Layer):\n \"\"\"FeedForward层\n 如果activation不是一个list,那么它就是两个Dense层的叠加;如果activation是\n 一个list,那么第一个Dense层将会被替换成门控线性单元(Gated Linear Unit)。\n 参考论文: https://arxiv.org/abs/2002.05202\n \"\"\"\n def __init__(\n self,\n units,\n activation='relu',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n **kwargs\n ):\n super(FeedForward, self).__init__(**kwargs)\n self.units = units\n if not isinstance(activation, list):\n activation = [activation]\n self.activation = [activations.get(act) for act in activation]\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n\n @integerize_shape\n def build(self, input_shape):\n super(FeedForward, self).build(input_shape)\n output_dim = input_shape[-1]\n\n for i, activation in enumerate(self.activation):\n i_dense = Dense(\n units=self.units,\n activation=activation,\n use_bias=self.use_bias,\n kernel_initializer=self.kernel_initializer\n )\n setattr(self, 'i%s_dense' % i, i_dense)\n\n self.o_dense = Dense(\n units=output_dim,\n use_bias=self.use_bias,\n kernel_initializer=self.kernel_initializer\n )\n\n @recompute_grad\n def call(self, inputs):\n x = self.i0_dense(inputs)\n for i in range(1, len(self.activation)):\n x = x * getattr(self, 'i%s_dense' % i)(inputs)\n x = self.o_dense(x)\n return x\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': [\n activations.serialize(act) for act in self.activation\n ],\n 'use_bias': self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n }\n base_config = super(FeedForward, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass ConditionalRandomField(Layer):\n \"\"\"纯Keras实现CRF层\n CRF层本质上是一个带训练参数的loss计算层。\n \"\"\"\n def __init__(self, lr_multiplier=1, **kwargs):\n super(ConditionalRandomField, self).__init__(**kwargs)\n self.lr_multiplier = lr_multiplier # 当前层学习率的放大倍数\n\n @integerize_shape\n def build(self, input_shape):\n super(ConditionalRandomField, self).build(input_shape)\n output_dim = input_shape[-1]\n self._trans = self.add_weight(\n name='trans',\n shape=(output_dim, output_dim),\n initializer='glorot_uniform'\n )\n if self.lr_multiplier != 1:\n K.set_value(self._trans, K.eval(self._trans) / self.lr_multiplier)\n\n @property\n def trans(self):\n if self.lr_multiplier != 1:\n return self.lr_multiplier * self._trans\n else:\n return self._trans\n\n def compute_mask(self, inputs, mask=None):\n return None\n\n def call(self, inputs, mask=None):\n return sequence_masking(inputs, mask, '-inf', 1)\n\n def target_score(self, y_true, y_pred):\n \"\"\"计算目标路径的相对概率(还没有归一化)\n 要点:逐标签得分,加上转移概率得分。\n \"\"\"\n point_score = tf.einsum('bni,bni->b', y_true, y_pred) # 逐标签得分\n trans_score = tf.einsum(\n 'bni,ij,bnj->b', y_true[:, :-1], self.trans, y_true[:, 1:]\n ) # 标签转移得分\n return point_score + trans_score\n\n def log_norm_step(self, inputs, states):\n \"\"\"递归计算归一化因子\n 要点:1、递归计算;2、用logsumexp避免溢出。\n \"\"\"\n inputs, mask = inputs[:, :-1], inputs[:, -1:]\n states = K.expand_dims(states[0], 2) # (batch_size, output_dim, 1)\n trans = K.expand_dims(self.trans, 0) # (1, output_dim, output_dim)\n outputs = tf.reduce_logsumexp(\n states + trans, 1\n ) # (batch_size, output_dim)\n outputs = outputs + inputs\n outputs = mask * outputs + (1 - mask) * states[:, :, 0]\n return outputs, [outputs]\n\n def dense_loss(self, y_true, y_pred):\n \"\"\"y_true需要是one hot形式\n \"\"\"\n # 导出mask并转换数据类型\n mask = K.all(K.greater(y_pred, -1e6), axis=2, keepdims=True)\n mask = K.cast(mask, K.floatx())\n # 计算目标分数\n y_true, y_pred = y_true * mask, y_pred * mask\n target_score = self.target_score(y_true, y_pred)\n # 递归计算log Z\n init_states = [y_pred[:, 0]]\n y_pred = K.concatenate([y_pred, mask], axis=2)\n input_length = K.int_shape(y_pred[:, 1:])[1]\n log_norm, _, _ = K.rnn(\n self.log_norm_step,\n y_pred[:, 1:],\n init_states,\n input_length=input_length\n ) # 最后一步的log Z向量\n log_norm = tf.reduce_logsumexp(log_norm, 1) # logsumexp得标量\n # 计算损失 -log p\n return log_norm - target_score\n\n def sparse_loss(self, y_true, y_pred):\n \"\"\"y_true需要是整数形式(非one hot)\n \"\"\"\n # y_true需要重新明确一下shape和dtype\n y_true = K.reshape(y_true, K.shape(y_pred)[:-1])\n y_true = K.cast(y_true, 'int32')\n # 转为one hot\n y_true = K.one_hot(y_true, K.shape(self.trans)[0])\n return self.dense_loss(y_true, y_pred)\n\n def dense_accuracy(self, y_true, y_pred):\n \"\"\"训练过程中显示逐帧准确率的函数,排除了mask的影响\n 此处y_true需要是one hot形式\n \"\"\"\n y_true = K.argmax(y_true, 2)\n return self.sparse_accuracy(y_true, y_pred)\n\n def sparse_accuracy(self, y_true, y_pred):\n \"\"\"训练过程中显示逐帧准确率的函数,排除了mask的影响\n 此处y_true需要是整数形式(非one hot)\n \"\"\"\n # 导出mask并转换数据类型\n mask = K.all(K.greater(y_pred, -1e6), axis=2)\n mask = K.cast(mask, K.floatx())\n # y_true需要重新明确一下shape和dtype\n y_true = K.reshape(y_true, K.shape(y_pred)[:-1])\n y_true = K.cast(y_true, 'int32')\n # 逐标签取最大来粗略评测训练效果\n y_pred = K.cast(K.argmax(y_pred, 2), 'int32')\n isequal = K.cast(K.equal(y_true, y_pred), K.floatx())\n return K.sum(isequal * mask) / K.sum(mask)\n\n def get_config(self):\n config = {\n 'lr_multiplier': self.lr_multiplier,\n }\n base_config = super(ConditionalRandomField, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass MaximumEntropyMarkovModel(Layer):\n \"\"\"(双向)最大熵隐马尔可夫模型\n 作用和用法都类似CRF,但是比CRF更快更简单。\n \"\"\"\n def __init__(self, lr_multiplier=1, hidden_dim=None, **kwargs):\n super(MaximumEntropyMarkovModel, self).__init__(**kwargs)\n self.lr_multiplier = lr_multiplier # 当前层学习率的放大倍数\n self.hidden_dim = hidden_dim # 如果非None,则将转移矩阵低秩分解\n\n @integerize_shape\n def build(self, input_shape):\n super(MaximumEntropyMarkovModel, self).build(input_shape)\n output_dim = input_shape[-1]\n\n if self.hidden_dim is None:\n self._trans = self.add_weight(\n name='trans',\n shape=(output_dim, output_dim),\n initializer='glorot_uniform'\n )\n if self.lr_multiplier != 1:\n K.set_value(\n self._trans,\n K.eval(self._trans) / self.lr_multiplier\n )\n else:\n self._l_trans = self.add_weight(\n name='l_trans',\n shape=(output_dim, self.hidden_dim),\n initializer='glorot_uniform'\n )\n self._r_trans = self.add_weight(\n name='r_trans',\n shape=(output_dim, self.hidden_dim),\n initializer='glorot_uniform'\n )\n\n if self.lr_multiplier != 1:\n K.set_value(\n self._l_trans,\n K.eval(self._l_trans) / self.lr_multiplier\n )\n K.set_value(\n self._r_trans,\n K.eval(self._r_trans) / self.lr_multiplier\n )\n\n @property\n def trans(self):\n if self.lr_multiplier != 1:\n return self.lr_multiplier * self._trans\n else:\n return self._trans\n\n @property\n def l_trans(self):\n if self.lr_multiplier != 1:\n return self.lr_multiplier * self._l_trans\n else:\n return self._l_trans\n\n @property\n def r_trans(self):\n if self.lr_multiplier != 1:\n return self.lr_multiplier * self._r_trans\n else:\n return self._r_trans\n\n def compute_mask(self, inputs, mask=None):\n return None\n\n def call(self, inputs, mask=None):\n return sequence_masking(inputs, mask, '-inf', 1)\n\n def reverse_sequence(self, inputs, mask=None):\n if mask is None:\n return [x[:, ::-1] for x in inputs]\n else:\n length = K.cast(K.sum(mask, 1), 'int32')\n return [tf.reverse_sequence(x, length, seq_axis=1) for x in inputs]\n\n def basic_loss(self, y_true, y_pred, go_backwards=False):\n \"\"\"y_true需要是整数形式(非one hot)\n \"\"\"\n # 导出mask并转换数据类型\n mask = K.all(K.greater(y_pred, -1e6), axis=2)\n mask = K.cast(mask, K.floatx())\n # y_true需要重新明确一下shape和dtype\n y_true = K.reshape(y_true, K.shape(y_pred)[:-1])\n y_true = K.cast(y_true, 'int32')\n # 反转相关\n if self.hidden_dim is None:\n if go_backwards: # 是否反转序列\n y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)\n trans = K.transpose(self.trans)\n else:\n trans = self.trans\n histoty = K.gather(trans, y_true)\n else:\n if go_backwards: # 是否反转序列\n y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)\n r_trans, l_trans = self.l_trans, self.r_trans\n else:\n l_trans, r_trans = self.l_trans, self.r_trans\n histoty = K.gather(l_trans, y_true)\n histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans)\n # 计算loss\n histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1)\n y_pred = (y_pred + histoty) / 2\n loss = K.sparse_categorical_crossentropy(\n y_true, y_pred, from_logits=True\n )\n return K.sum(loss * mask) / K.sum(mask)\n\n def sparse_loss(self, y_true, y_pred):\n \"\"\"y_true需要是整数形式(非one hot)\n \"\"\"\n loss = self.basic_loss(y_true, y_pred, False)\n loss = loss + self.basic_loss(y_true, y_pred, True)\n return loss / 2\n\n def dense_loss(self, y_true, y_pred):\n \"\"\"y_true需要是one hot形式\n \"\"\"\n y_true = K.argmax(y_true, 2)\n return self.sparse_loss(y_true, y_pred)\n\n def basic_accuracy(self, y_true, y_pred, go_backwards=False):\n \"\"\"训练过程中显示逐帧准确率的函数,排除了mask的影响\n 此处y_true需要是整数形式(非one hot)\n \"\"\"\n # 导出mask并转换数据类型\n mask = K.all(K.greater(y_pred, -1e6), axis=2)\n mask = K.cast(mask, K.floatx())\n # y_true需要重新明确一下shape和dtype\n y_true = K.reshape(y_true, K.shape(y_pred)[:-1])\n y_true = K.cast(y_true, 'int32')\n # 反转相关\n if self.hidden_dim is None:\n if go_backwards: # 是否反转序列\n y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)\n trans = K.transpose(self.trans)\n else:\n trans = self.trans\n histoty = K.gather(trans, y_true)\n else:\n if go_backwards: # 是否反转序列\n y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)\n r_trans, l_trans = self.l_trans, self.r_trans\n else:\n l_trans, r_trans = self.l_trans, self.r_trans\n histoty = K.gather(l_trans, y_true)\n histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans)\n # 计算逐标签accuracy\n histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1)\n y_pred = (y_pred + histoty) / 2\n y_pred = K.cast(K.argmax(y_pred, 2), 'int32')\n isequal = K.cast(K.equal(y_true, y_pred), K.floatx())\n return K.sum(isequal * mask) / K.sum(mask)\n\n def sparse_accuracy(self, y_true, y_pred):\n \"\"\"训练过程中显示逐帧准确率的函数,排除了mask的影响\n 此处y_true需要是整数形式(非one hot)\n \"\"\"\n accuracy = self.basic_accuracy(y_true, y_pred, False)\n accuracy = accuracy + self.basic_accuracy(y_true, y_pred, True)\n return accuracy / 2\n\n def dense_accuracy(self, y_true, y_pred):\n \"\"\"训练过程中显示逐帧准确率的函数,排除了mask的影响\n 此处y_true需要是one hot形式\n \"\"\"\n y_true = K.argmax(y_true, 2)\n return self.sparse_accuracy(y_true, y_pred)\n\n def get_config(self):\n config = {\n 'lr_multiplier': self.lr_multiplier,\n 'hidden_dim': self.hidden_dim,\n }\n base_config = super(MaximumEntropyMarkovModel, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass GlobalPointer(Layer):\n \"\"\"全局指针模块\n 将序列的每个(start, end)作为整体来进行判断\n \"\"\"\n def __init__(self, heads, head_size, RoPE=True, **kwargs):\n super(GlobalPointer, self).__init__(**kwargs)\n self.heads = heads\n self.head_size = head_size\n self.RoPE = RoPE\n\n def build(self, input_shape):\n super(GlobalPointer, self).build(input_shape)\n self.dense = Dense(self.head_size * self.heads * 2)\n\n def compute_mask(self, inputs, mask=None):\n return None\n\n @recompute_grad\n def call(self, inputs, mask=None):\n # 输入变换\n inputs = self.dense(inputs)\n inputs = tf.split(inputs, self.heads, axis=-1)\n inputs = K.stack(inputs, axis=-2)\n qw, kw = inputs[..., :self.head_size], inputs[..., self.head_size:]\n # RoPE编码\n if self.RoPE:\n pos = SinusoidalPositionEmbedding(self.head_size, 'zero')(inputs)\n cos_pos = K.repeat_elements(pos[..., None, 1::2], 2, -1)\n sin_pos = K.repeat_elements(pos[..., None, ::2], 2, -1)\n qw2 = K.stack([-qw[..., 1::2], qw[..., ::2]], 4)\n qw2 = K.reshape(qw2, K.shape(qw))\n qw = qw * cos_pos + qw2 * sin_pos\n kw2 = K.stack([-kw[..., 1::2], kw[..., ::2]], 4)\n kw2 = K.reshape(kw2, K.shape(kw))\n kw = kw * cos_pos + kw2 * sin_pos\n # 计算内积\n logits = tf.einsum('bmhd,bnhd->bhmn', qw, kw)\n # 排除padding\n logits = sequence_masking(logits, mask, '-inf', 2)\n logits = sequence_masking(logits, mask, '-inf', 3)\n # 排除下三角\n mask = tf.linalg.band_part(K.ones_like(logits), 0, -1)\n logits = logits - (1 - mask) * 1e12\n # scale返回\n return logits / self.head_size**0.5\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.heads, input_shape[1], input_shape[1])\n\n def get_config(self):\n config = {\n 'heads': self.heads,\n 'head_size': self.head_size,\n 'RoPE': self.RoPE,\n }\n base_config = super(GlobalPointer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Loss(Layer):\n \"\"\"特殊的层,用来定义复杂loss\n \"\"\"\n def __init__(self, output_axis=None, **kwargs):\n super(Loss, self).__init__(**kwargs)\n self.output_axis = output_axis\n\n def call(self, inputs, mask=None):\n loss = self.compute_loss(inputs, mask)\n self.add_loss(loss, inputs=inputs)\n if self.output_axis is None:\n return inputs\n elif isinstance(self.output_axis, list):\n return [inputs[i] for i in self.output_axis]\n else:\n return inputs[self.output_axis]\n\n def compute_loss(self, inputs, mask=None):\n raise NotImplementedError\n\n def compute_output_shape(self, input_shape):\n if self.output_axis is None:\n return input_shape\n elif isinstance(self.output_axis, list):\n return [input_shape[i] for i in self.output_axis]\n else:\n return input_shape[self.output_axis]\n\n def compute_mask(self, inputs, mask):\n if mask is not None:\n if self.output_axis is None:\n return mask\n elif isinstance(self.output_axis, list):\n return [mask[i] for i in self.output_axis]\n else:\n return mask[self.output_axis]\n\n def get_config(self):\n config = {\n 'output_axis': self.output_axis,\n }\n base_config = super(Loss, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ncustom_objects = {\n 'Embedding': Embedding,\n 'BiasAdd': BiasAdd,\n 'Concatenate1D': Concatenate1D,\n 'MultiHeadAttention': MultiHeadAttention,\n 'LayerNormalization': LayerNormalization,\n 'PositionEmbedding': PositionEmbedding,\n 'SinusoidalPositionEmbedding': SinusoidalPositionEmbedding,\n 'RelativePositionEmbedding': RelativePositionEmbedding,\n 'RelativePositionEmbeddingT5': RelativePositionEmbeddingT5,\n 'FeedForward': FeedForward,\n 'ConditionalRandomField': ConditionalRandomField,\n 'MaximumEntropyMarkovModel': MaximumEntropyMarkovModel,\n 'GlobalPointer': GlobalPointer,\n 'Loss': Loss,\n}\n\nkeras.utils.get_custom_objects().update(custom_objects)\n"
] | [
[
"numpy.log",
"tensorflow.reverse_sequence",
"tensorflow.split",
"tensorflow.reduce_logsumexp",
"tensorflow.einsum"
]
] |
thduynguyen/sixd_toolkit | [
"972015e860967ddff38ca74dc7d36f929d05724d"
] | [
"conversion/brachmann_convert_gt.py"
] | [
"# Author: Tomas Hodan ([email protected])\n# Center for Machine Perception, Czech Technical University in Prague\n\n# Note: The last RGB-D image of the Benchvise sequence of the Hinterstoisser's\n# dataset was removed, because Brachmann et al. do not provide the extended\n# ground truth poses for it.\n\nimport os\nimport sys\nimport glob\nimport math\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport yaml\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom pysixd import inout, misc, transform, renderer\nimport hinter_flip\n\nfrom params.dataset_params import get_dataset_params\npar = get_dataset_params('hinterstoisser')\n\nbase_path = '/local/datasets/tlod/hinterstoisser/'\nrgb_mpath = base_path + 'test/02/rgb/{:04d}.png'\nmodel_mpath = base_path + 'models/obj_{:02d}.ply' # Already transformed\npose_mpath = '/local/datasets/tlod/dresden/occlusion/poses/{}/info_{:05d}.txt'\nscene_gt_path = base_path + 'test/02/scene_gt_brachmann.yml'\n\nobj_names_id_map = {'Ape': 1, 'Can': 5, 'Cat': 6, 'Driller': 8, 'Duck': 9,\n 'Eggbox': 10, 'Glue': 11, 'Holepuncher': 12}\n\ndef load_gt_pose_brachmann(path):\n R = []\n t = []\n rotation_sec = False\n center_sec = False\n with open(path, 'r') as f:\n for line in f.read().splitlines():\n if 'rotation:' in line:\n rotation_sec = True\n elif rotation_sec:\n R += line.split(' ')\n if len(R) == 9:\n rotation_sec = False\n elif 'center:' in line:\n center_sec = True\n elif center_sec:\n t = line.split(' ')\n center_sec = False\n\n assert((len(R) == 0 and len(t) == 0) or\n (len(R) == 9 and len(t) == 3))\n\n if len(R) == 0:\n pose = {'R': np.array([]), 't': np.array([])}\n else:\n pose = {'R': np.array(map(float, R)).reshape((3, 3)),\n 't': np.array(map(float, t)).reshape((3, 1))}\n\n # Flip Y and Z axis (OpenGL -> OpenCV coordinate system)\n yz_flip = np.eye(3, dtype=np.float32)\n yz_flip[0, 0], yz_flip[1, 1], yz_flip[2, 2] = 1, -1, -1\n pose['R'] = yz_flip.dot(pose['R'])\n pose['t'] = yz_flip.dot(pose['t'])\n return pose\n\n# Get list of image IDs\nrgb_fpaths = sorted(glob.glob(os.path.dirname(pose_mpath.format('Ape', 0)) + '/*.txt'))\nim_ids = sorted([int(e.split('info_')[1].split('.txt')[0]) for e in rgb_fpaths])\n\nscene_gt = {}\nfor obj_name in sorted(obj_names_id_map.keys()):\n\n # Load object model\n obj_id = obj_names_id_map[obj_name]\n model = inout.load_ply(model_mpath.format(obj_id))\n\n # Transformation which was applied to the object models (its inverse will\n # be applied to the GT poses):\n # 1) Translate the bounding box center to the origin - Brachmann et al.\n # already translated the bounding box to the center\n # 2) Rotate around Y axis by pi + flip for some objects\n R_model = transform.rotation_matrix(math.pi, [0, 1, 0])[:3, :3]\n\n # Extra rotation around Z axis by pi for some models\n if hinter_flip.obj_flip_z[obj_id]:\n R_z = transform.rotation_matrix(math.pi, [0, 0, 1])[:3, :3]\n R_model = R_z.dot(R_model)\n\n # The ground truth poses of Brachmann et al. are related to a different\n # model coordinate system - to get the original Hinterstoisser's orientation\n # of the objects, we need to rotate by pi/2 around X and by pi/2 around Z\n R_z_90 = transform.rotation_matrix(-math.pi * 0.5, [0, 0, 1])[:3, :3]\n R_x_90 = transform.rotation_matrix(-math.pi * 0.5, [1, 0, 0])[:3, :3]\n R_conv = np.linalg.inv(R_model.dot(R_z_90.dot(R_x_90)))\n\n for im_id in im_ids:\n if im_id % 10 == 0:\n print('obj,view: ' + obj_name + ',' + str(im_id))\n\n # Load the GT pose\n pose = load_gt_pose_brachmann(pose_mpath.format(obj_name, im_id))\n if pose['R'].size != 0 and pose['t'].size != 0:\n\n # Transfom the GT pose\n R_m2c = pose['R'].dot(R_conv)\n t_m2c = pose['t'] * 1000 # from [m] to [mm]\n\n # Get 2D bounding box of the object model at the ground truth pose\n obj_bb = misc.calc_pose_2d_bbox(model, par['cam']['im_size'],\n par['cam']['K'], R_m2c, t_m2c)\n\n # Visualization\n if False:\n rgb = inout.load_im(rgb_mpath.format(im_id, im_id))\n ren_rgb = renderer.render(model, par['cam']['im_size'],\n par['cam']['K'], R_m2c, t_m2c, mode='rgb')\n vis_rgb = 0.4 * rgb.astype(np.float32) +\\\n 0.6 * ren_rgb.astype(np.float32)\n vis_rgb = vis_rgb.astype(np.uint8)\n vis_rgb = misc.draw_rect(vis_rgb, obj_bb)\n plt.imshow(vis_rgb)\n plt.show()\n\n scene_gt.setdefault(im_id, []).append(\n {\n 'obj_id': obj_id,\n 'cam_R_m2c': R_m2c.flatten().tolist(),\n 'cam_t_m2c': t_m2c.flatten().tolist(),\n 'obj_bb': [int(x) for x in obj_bb]\n }\n )\n\n def float_representer(dumper, value):\n text = '{0:.8f}'.format(value)\n return dumper.represent_scalar(u'tag:yaml.org,2002:float', text)\n yaml.add_representer(float, float_representer)\n\n # Store ground truth poses\n with open(scene_gt_path, 'w') as f:\n yaml.dump(scene_gt, f, width=10000)\n"
] | [
[
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.imshow",
"numpy.eye"
]
] |
i-need-sleep/REDQ | [
"d237f58075773d576482c34d39751cccbbb59f86"
] | [
"train_redq_sac_exp1-2-2.py"
] | [
"import gym\r\nimport numpy as np\r\nimport torch\r\nimport time\r\nimport sys\r\nfrom redq_modified.algos.redq_sac import REDQSACAgent\r\nfrom redq_modified.algos.core import mbpo_epoches, test_agent\r\nfrom redq_modified.utils.run_utils import setup_logger_kwargs\r\nfrom redq_modified.utils.bias_utils import log_bias_evaluation\r\nfrom redq_modified.utils.logx import EpochLogger\r\n\r\ndef redq_sac(env_name, seed=0, epochs='mbpo', steps_per_epoch=1000,\r\n max_ep_len=1000, n_evals_per_epoch=1,\r\n logger_kwargs=dict(), debug=False,\r\n # following are agent related hyperparameters\r\n hidden_sizes=(256, 256), replay_size=int(1e6), batch_size=256,\r\n lr=3e-4, gamma=0.99, polyak=0.995,\r\n alpha=0.2, auto_alpha=True, target_entropy='mbpo',\r\n start_steps=5000, delay_update_steps='auto',\r\n utd_ratio=20, num_Q=10, num_min=2, q_target_mode='min',\r\n policy_update_delay=20,\r\n # following are bias evaluation related\r\n evaluate_bias=True, n_mc_eval=1000, n_mc_cutoff=350, reseed_each_epoch=True, eval=\"deterministic\"\r\n ):\r\n \"\"\"\r\n :param env_name: name of the gym environment\r\n :param seed: random seed\r\n :param epochs: number of epochs to run\r\n :param steps_per_epoch: number of timestep (datapoints) for each epoch\r\n :param max_ep_len: max timestep until an episode terminates\r\n :param n_evals_per_epoch: number of evaluation runs for each epoch\r\n :param logger_kwargs: arguments for logger\r\n :param debug: whether to run in debug mode\r\n :param hidden_sizes: hidden layer sizes\r\n :param replay_size: replay buffer size\r\n :param batch_size: mini-batch size\r\n :param lr: learning rate for all networks\r\n :param gamma: discount factor\r\n :param polyak: hyperparameter for polyak averaged target networks\r\n :param alpha: SAC entropy hyperparameter\r\n :param auto_alpha: whether to use adaptive SAC\r\n :param target_entropy: used for adaptive SAC\r\n :param start_steps: the number of random data collected in the beginning of training\r\n :param delay_update_steps: after how many data collected should we start updates\r\n :param utd_ratio: the update-to-data ratio\r\n :param num_Q: number of Q networks in the Q ensemble\r\n :param num_min: number of sampled Q values to take minimal from\r\n :param q_target_mode: 'min' for minimal, 'ave' for average, 'rem' for random ensemble mixture\r\n :param policy_update_delay: how many updates until we update policy network\r\n \"\"\"\r\n\r\n if eval == 'deterministic':\r\n deterministic = True\r\n elif eval == 'stocastic':\r\n deterministic = False\r\n if debug: # use --debug for very quick debugging\r\n hidden_sizes = [2,2]\r\n batch_size = 2\r\n utd_ratio = 2\r\n num_Q = 3\r\n max_ep_len = 100\r\n start_steps = 100\r\n steps_per_epoch = 100\r\n\r\n # use gpu if available\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n print(device)\r\n # set number of epoch\r\n if epochs == 'mbpo' or epochs < 0:\r\n epochs = mbpo_epoches[env_name]\r\n total_steps = steps_per_epoch * epochs + 1\r\n\r\n \"\"\"set up logger\"\"\"\r\n logger = EpochLogger(**logger_kwargs)\r\n logger.save_config(locals())\r\n\r\n \"\"\"set up environment and seeding\"\"\"\r\n env_fn = lambda: gym.make(env_name)\r\n env, test_env, bias_eval_env = env_fn(), env_fn(), env_fn()\r\n # seed torch and numpy\r\n torch.manual_seed(seed)\r\n np.random.seed(seed)\r\n\r\n # seed environment along with env action space so that everything is properly seeded for reproducibility\r\n def seed_all(epoch):\r\n seed_shift = epoch * 9999\r\n mod_value = 999999\r\n env_seed = (seed + seed_shift) % mod_value\r\n test_env_seed = (seed + 10000 + seed_shift) % mod_value\r\n bias_eval_env_seed = (seed + 20000 + seed_shift) % mod_value\r\n torch.manual_seed(env_seed)\r\n np.random.seed(env_seed)\r\n env.seed(env_seed)\r\n env.action_space.np_random.seed(env_seed)\r\n test_env.seed(test_env_seed)\r\n test_env.action_space.np_random.seed(test_env_seed)\r\n bias_eval_env.seed(bias_eval_env_seed)\r\n bias_eval_env.action_space.np_random.seed(bias_eval_env_seed)\r\n seed_all(epoch=0)\r\n\r\n \"\"\"prepare to init agent\"\"\"\r\n # get obs and action dimensions\r\n obs_dim = env.observation_space.shape[0]\r\n act_dim = env.action_space.shape[0]\r\n # if environment has a smaller max episode length, then use the environment's max episode length\r\n max_ep_len = env._max_episode_steps if max_ep_len > env._max_episode_steps else max_ep_len\r\n # Action limit for clamping: critically, assumes all dimensions share the same bound!\r\n # we need .item() to convert it from numpy float to python float\r\n act_limit = env.action_space.high[0].item()\r\n # keep track of run time\r\n start_time = time.time()\r\n # flush logger (optional)\r\n sys.stdout.flush()\r\n #################################################################################################\r\n\r\n \"\"\"init agent and start training\"\"\"\r\n agent = REDQSACAgent(env_name, obs_dim, act_dim, act_limit, device,\r\n hidden_sizes, replay_size, batch_size,\r\n lr, gamma, polyak,\r\n alpha, auto_alpha, target_entropy,\r\n start_steps, delay_update_steps,\r\n utd_ratio, num_Q, num_min, q_target_mode,\r\n policy_update_delay)\r\n\r\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\r\n\r\n for t in range(total_steps):\r\n # get action from agent\r\n a = agent.get_exploration_action(o, env)\r\n # Step the env, get next observation, reward and done signal\r\n o2, r, d, _ = env.step(a)\r\n\r\n # Very important: before we let agent store this transition,\r\n # Ignore the \"done\" signal if it comes from hitting the time\r\n # horizon (that is, when it's an artificial terminal signal\r\n # that isn't based on the agent's state)\r\n ep_len += 1\r\n d = False if ep_len == max_ep_len else d\r\n\r\n # give new data to agent\r\n agent.store_data(o, a, r, o2, d)\r\n # let agent update\r\n agent.train(logger)\r\n # set obs to next obs\r\n o = o2\r\n ep_ret += r\r\n\r\n\r\n if d or (ep_len == max_ep_len):\r\n # store episode return and length to logger\r\n logger.store(EpRet=ep_ret, EpLen=ep_len)\r\n # reset environment\r\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\r\n\r\n # End of epoch wrap-up\r\n if (t+1) % steps_per_epoch == 0:\r\n epoch = t // steps_per_epoch\r\n\r\n # Test the performance of the deterministic version of the agent. \r\n # Add deterministic/stocastic flag\r\n test_agent(agent, test_env, max_ep_len, logger, deterministic=deterministic) # add logging here\r\n if evaluate_bias:\r\n log_bias_evaluation(bias_eval_env, agent, logger, max_ep_len, alpha, gamma, n_mc_eval, n_mc_cutoff)\r\n\r\n # reseed should improve reproducibility (should make results the same whether bias evaluation is on or not)\r\n if reseed_each_epoch:\r\n seed_all(epoch)\r\n\r\n \"\"\"logging\"\"\"\r\n # Log info about epoch\r\n logger.log_tabular('Epoch', epoch)\r\n logger.log_tabular('TotalEnvInteracts', t)\r\n logger.log_tabular('Time', time.time()-start_time)\r\n logger.log_tabular('EpRet', with_min_and_max=True)\r\n logger.log_tabular('EpLen', average_only=True)\r\n logger.log_tabular('TestEpRet', with_min_and_max=True)\r\n logger.log_tabular('TestEpLen', average_only=True)\r\n logger.log_tabular('Q1Vals', with_min_and_max=True)\r\n logger.log_tabular('LossQ1', average_only=True)\r\n logger.log_tabular('LogPi', with_min_and_max=True)\r\n logger.log_tabular('LossPi', average_only=True)\r\n logger.log_tabular('Alpha', with_min_and_max=True)\r\n logger.log_tabular('LossAlpha', average_only=True)\r\n logger.log_tabular('PreTanh', with_min_and_max=True)\r\n\r\n if evaluate_bias:\r\n logger.log_tabular(\"MCDisRet\", with_min_and_max=True)\r\n logger.log_tabular(\"MCDisRetEnt\", with_min_and_max=True)\r\n logger.log_tabular(\"QPred\", with_min_and_max=True)\r\n logger.log_tabular(\"QBias\", with_min_and_max=True)\r\n logger.log_tabular(\"QBiasAbs\", with_min_and_max=True)\r\n logger.log_tabular(\"NormQBias\", with_min_and_max=True)\r\n logger.log_tabular(\"QBiasSqr\", with_min_and_max=True)\r\n logger.log_tabular(\"NormQBiasSqr\", with_min_and_max=True)\r\n logger.dump_tabular()\r\n\r\n # flush logged information to disk\r\n sys.stdout.flush()\r\n\r\nif __name__ == '__main__':\r\n import argparse\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--env', type=str, default='Hopper-v2')\r\n parser.add_argument('--seed', '-s', type=int, default=0)\r\n parser.add_argument('--epochs', type=int, default=-1) # -1 means use mbpo epochs\r\n parser.add_argument('--exp_name', type=str, default='redq_sac')\r\n parser.add_argument('--data_dir', type=str, default='./data/1-2-2/')\r\n parser.add_argument('--debug', action='store_true')\r\n parser.add_argument('--setting', type=int, default=0)\r\n args = parser.parse_args()\r\n\r\n epochs = 1000\r\n if args.debug:\r\n epochs = 15\r\n\r\n # since we want SAC, set these values so that REDQ becomes SAC, in the original SAC paper, they also do not use adaptive\r\n # SAC entropy, so we set auto_alpha to False, and set alpha=0.2, which is the value they used for Ant and Hopper\r\n # this information can be found in the SAC paper and in their hyperparameter table\r\n utd_ratio = 1\r\n num_Q = 2\r\n auto_alpha = False\r\n alpha=0.2\r\n\r\n env_names = ['Hopper-v2', 'Ant-v2']\r\n buffer_list = [int(1e6), int(5e4), int(1e3)]\r\n seed_list = [0, 1]\r\n\r\n setting_to_run = args.setting\r\n current_setting = 0\r\n\r\n exp_name_prefix = 'sac'\r\n\r\n for buffer_size in buffer_list:\r\n for env_name in env_names:\r\n for seed in seed_list:\r\n if setting_to_run == current_setting: # only happens when setting value matches\r\n # modify the code here if you want to use a different naming scheme\r\n exp_name_full = '%s_poly%s_%s' % (exp_name_prefix, str(buffer_size), env_name)\r\n\r\n # specify experiment name, seed and data_dir.\r\n # for example, for seed 0, the progress.txt will be saved under data_dir/exp_name/exp_name_s0\r\n logger_kwargs = setup_logger_kwargs(exp_name_full, seed, args.data_dir)\r\n\r\n redq_sac(env_name, seed=seed, epochs=epochs,\r\n logger_kwargs=logger_kwargs, debug=args.debug,\r\n utd_ratio=utd_ratio, num_Q=num_Q, auto_alpha=auto_alpha, alpha=alpha, replay_size=buffer_size)\r\n current_setting += 1\r\n"
] | [
[
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.random.seed"
]
] |
RamiSketcher/AMMI-RL | [
"6d51587ff4d5dc14cba87fca561bd7b340b44586"
] | [
"rl/algorithms/mbrl/mopac.py"
] | [
"import os, subprocess, sys\nimport argparse\nimport importlib\nimport datetime\nimport random\n\nimport time\nimport wandb\n\nimport numpy as np\nimport torch as T\nimport torch.nn.functional as F\n\n# T.multiprocessing.set_sharing_strategy('file_system')\n\nfrom rl.algorithms.mbrl.mbrl import MBRL\nfrom rl.algorithms.mfrl.sac import SAC\nfrom rl.world_models.fake_world import FakeWorld\nimport rl.environments.mbpo.static as mbpo_static\n# from rl.data.dataset import RLDataModule\n\n\n\n\n\nclass MoPAC(MBRL, SAC, MBPO):\n \"\"\"\n Algorithm: Model-Predictive Actor-Critic (Dyna-style, MPC, Model-Based)\n\n 1: Initialize policy πφ, predictive model pθ, environment dataset Denv, model dataset Dmodel\n 2: for N epochs do\n 3: Train model pθ on Denv via maximum likelihood\n 4: for E steps do\n 5: Take action in environment according to πφ; add to Denv\n 6: for M model rollouts do\n 7: Sample st uniformly from Denv\n 8: Perform k-step model rollout starting from st using policy πφ; add to Dmodel\n 9: for G gradient updates do\n 10: Update policy parameters on model data: φ ← φ − λπ ˆ∇φ Jπ(φ, Dmodel)\n\n \"\"\"\n def __init__(self, exp_prefix, configs, seed, device, wb) -> None:\n super(MoPAC, self).__init__(exp_prefix, configs, seed, device)\n # print('init MoPAC Algorithm!')\n self.configs = configs\n self.seed = seed\n self._device_ = device\n self.WandB = wb\n self._build()\n\n\n ## build MoPAC components: (env, D, AC, alpha)\n def _build(self):\n super(MoPAC, self)._build()\n self._set_sac()\n self._set_fake_world()\n\n\n ## SAC\n def _set_sac(self):\n SAC._build_sac(self)\n\n\n ## FakeEnv\n def _set_fake_world(self):\n env_name = self.configs['environment']['name']\n device = self._device_\n if self.configs['environment']['name'][:4] == 'pddm':\n \tstatic_fns = None\n else:\n \tstatic_fns = mbpo_static[env_name[:-3].lower()]\n\n # self.fake_world = FakeWorld(self.world_model, static_fns, env_name, self.learn_env, self.configs, device)\n self.fake_world = FakeWorld(self.world_model)\n\n\n def learn(self):\n N = self.configs['algorithm']['learning']['epochs']\n NT = self.configs['algorithm']['learning']['epoch_steps']\n Ni = self.configs['algorithm']['learning']['init_epochs']\n Nx = self.configs['algorithm']['learning']['expl_epochs']\n\n E = self.configs['algorithm']['learning']['env_steps']\n G_sac = self.configs['algorithm']['learning']['grad_SAC_steps']\n\n # batch_size = self.configs['data']['batch_size']\n\n model_train_frequency = self.configs['world_model']['model_train_freq']\n batch_size_m = self.configs['world_model']['network']['batch_size'] # bs_m\n wm_epochs = self.configs['algorithm']['learning']['grad_WM_steps']\n real_ratio = self.configs['data']['real_ratio'] # rr\n batch_size = self.configs['data']['batch_size'] # bs\n batch_size_ro = self.configs['data']['rollout_batch_size'] # bs_ro\n\n o, Z, el, t = self.learn_env.reset(), 0, 0, 0\n # o, Z, el, t = self.initialize_learning(NT, Ni)\n # oldJs = [0, 0, 0]\n # JQList, JAlphaList, JPiList = [0], [0], [0]\n # AlphaList = [self.alpha]*Ni\n\n # JTrainList, JValList, LossTestList = [0], [0], [0]\n # WMList = {'mu': [0]*Ni, 'sigma': [0]*Ni}\n # JMeanTrainList, JTrainList, JMeanValList, JValList = [], [], [], []\n # LossTestList = []\n # WMList = {'mu': [], 'sigma': []}\n\n logs = dict()\n lastEZ, lastES = 0, -2\n K = 1\n\n start_time_real = time.time()\n for n in range(1, N+1):\n if self.configs['experiment']['print_logs']:\n print('=' * 50)\n if n > Nx:\n print(f'\\n[ Epoch {n} Learning ]'+(' '*50))\n # JQList, JPiList = [], []\n # JTrainList, JValList, LossTestList = [], [], []\n oldJs = [0, 0, 0]\n JQList, JAlphaList, JPiList = [0], [0], [0]\n JTrainList, JValList, LossTestList = [0], [0], [0]\n elif n > Ni:\n print(f'\\n[ Epoch {n} Exploration + Learning ]'+(' '*50))\n JQList, JPiList = [], []\n JTrainList, JValList, LossTestList = [], [], []\n else:\n print(f'\\n[ Epoch {n} Inintial Exploration ]'+(' '*50))\n oldJs = [0, 0, 0]\n JQList, JAlphaList, JPiList = [0], [0], [0]\n JTrainList, JValList, LossTestList = [0], [0], [0]\n\n print(f'[ Replay Buffer ] Size: {self.buffer.size}')\n nt = 0\n learn_start_real = time.time()\n while nt < NT: # full epoch\n # Interaction steps\n for e in range(1, E+1):\n o, Z, el, t = self.internact(n, o, Z, el, t)\n # print('Return: ', Z)\n\n # Taking gradient steps after exploration\n if n > Ni:\n if nt % model_train_frequency == 0:\n #03. Train model pθ on Denv via maximum likelihood\n # PyTorch Lightning Model Training\n print(f'\\n[ Epoch {n} Training World Model ]'+(' '*50))\n # print(f'\\n\\n[ Training ] Dynamics Model(s), mEpochs = {mEpochs}\n # self.data_module = RLDataModule(self.buffer, self.configs['data'])\n\n # JTrainLog, JValLog, LossTest = self.fake_world.train(self.data_module)\n # JTrainList.append(JTrainLog)\n # JValList.append(JValLog)\n # LossTestList.append(LossTest)\n\n ho_mean = self.fake_world.train_fake_world(self.buffer)\n JValList.append(ho_mean) # ho: holdout\n\n # Update K-steps length\n K = self.set_rollout_length(n)\n\n # Reallocate model buffer\n # if K != K_new:\n # K = K_new\n self.reallocate_model_buffer(batch_size_ro, K, NT, model_train_frequency)\n\n # Generate M k-steps imaginary rollouts for SAC traingin\n self.rollout_world_model(batch_size_ro, K, n)\n\n # JQList, JPiList = [], []\n # AlphaList = [self.alpha]*G_sac\n for g in range(1, G_sac+1): # it was \"for g in (1, G_sac+1):\" for 2 months, and I did't notice!! ;(\n # print(f'Actor-Critic Grads...{g}', end='\\r')\n print(f'[ Epoch {n} Training Actor-Critic ] Env Steps: {nt+1} | AC Grads: {g} | Return: {round(Z, 2)}'+(\" \"*10), end='\\r')\n ## Sample a batch B_sac\n B_sac = self.sac_batch(real_ratio, batch_size)\n ## Train networks using batch B_sac\n Jq, Jalpha, Jpi = self.trainAC(g, B_sac, oldJs)\n oldJs = [Jq, Jalpha, Jpi]\n JQList.append(Jq.item())\n JPiList.append(Jpi.item())\n if self.configs['actor']['automatic_entropy']:\n JAlphaList.append(Jalpha.item())\n AlphaList.append(self.alpha)\n\n nt += E\n\n print('\\n')\n # logs['time/training '] = time.time() - learn_start_real\n\n # logs['training/wm/Jtrain_mean '] = np.mean(JMeanTrainList)\n # logs['training/wm/Jtrain '] = np.mean(JTrainList)\n logs['training/wm/Jval '] = np.mean(JValList)\n # logs['training/wm/test_mse '] = np.mean(LossTestList)\n\n logs['training/sac/Jq '] = np.mean(JQList)\n logs['training/sac/Jpi '] = np.mean(JPiList)\n if self.configs['actor']['automatic_entropy']:\n logs['training/obj/sac/Jalpha '] = np.mean(JAlphaList)\n logs['training/obj/sac/alpha '] = np.mean(AlphaList)\n\n logs['data/env_buffer '] = self.buffer.size\n if hasattr(self, 'model_buffer'):\n logs['data/model_buffer '] = self.model_buffer.size\n else:\n logs['data/model_buffer '] = 0\n logs['data/rollout_length '] = K\n\n eval_start_real = time.time()\n EZ, ES, EL = self.evaluate()\n\n # logs['time/evaluation '] = time.time() - eval_start_real\n\n if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand':\n logs['evaluation/episodic_score_mean '] = np.mean(ES)\n logs['evaluation/episodic_score_std '] = np.std(ES)\n else:\n logs['evaluation/episodic_return_mean'] = np.mean(EZ)\n logs['evaluation/episodic_return_std '] = np.std(EZ)\n logs['evaluation/episodic_length_mean'] = np.mean(EL)\n\n logs['time/total '] = time.time() - start_time_real\n\n # if n > (N - 50):\n # if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand':\n # if np.mean(ES) > lastES:\n # print(f'[ Epoch {n} Agent Saving ] ')\n # env_name = self.configs['environment']['name']\n # alg_name = self.configs['algorithm']['name']\n # T.save(self.actor_critic.actor,\n # f'./saved_agents/{env_name}-{alg_name}-seed:{self.seed}-epoch:{n}.pTtar')\n # lastES = np.mean(ES)\n # else:\n # if np.mean(EZ) > lastEZ:\n # print(f'[ Epoch {n} Agent Saving ] ')\n # env_name = self.configs['environment']['name']\n # alg_name = self.configs['algorithm']['name']\n # T.save(self.actor_critic.actor,\n # f'./saved_agents/{env_name}-{alg_name}-seed:{self.seed}-epoch:{n}.pTtar')\n # lastEZ = np.mean(EZ)\n\n # Printing logs\n if self.configs['experiment']['print_logs']:\n for k, v in logs.items():\n print(f'{k} {round(v, 2)}'+(' '*10))\n\n # WandB\n if self.WandB:\n wandb.log(logs)\n\n self.learn_env.close()\n self.eval_env.close()\n\n\n def set_rollout_length(self, n):\n if self.configs['world_model']['rollout_schedule'] == None:\n \tK = 1\n else:\n \tmin_epoch, max_epoch, min_length, max_length = self.configs['world_model']['rollout_schedule']\n\n \tif n <= min_epoch:\n \t\tK = min_length\n \telse:\n \t\tdx = (n - min_epoch) / (max_epoch - min_epoch)\n \t\tdx = min(dx, 1)\n \t\tK = dx * (max_length - min_length) + min_length\n\n K = int(K)\n return K\n\n\n def rollout_world_model(self, batch_size_ro, K, n):\n \t# 07. Sample st uniformly from Denv\n \tdevice = self._device_\n \tbatch_size = min(batch_size_ro, self.buffer.size)\n \tprint(f'[ Epoch {n} Model Rollout ] Batch Size: {batch_size} | Rollout Length: {K}'+(' '*50))\n \tB_ro = self.buffer.sample_batch(batch_size) # Torch\n \tO = B_ro['observations']\n\n \t# 08. Perform k-step model rollout starting from st using policy πφ; add to Dmodel\n \tfor k in range(1, K+1):\n \t\tA = self.actor_critic.get_action(O) # Stochastic action | No reparameterization\n\n \t\tO_next, R, D, _ = self.fake_world.step(O, A) # ip: Tensor, op: Tensor\n \t\t# O_next, R, D, _ = self.fake_world.step_np(O, A) # ip: Tensor, op: Numpy\n\n \t\t# self.model_buffer.store_batch(O.numpy(), A, R, O_next, D) # ip: Numpy\n \t\tself.model_buffer.store_batch(O, A, R, O_next, D) # ip: Tensor\n\n \t\tO_next = T.Tensor(O_next)\n \t\tD = T.tensor(D, dtype=T.bool)\n \t\t# nonD = ~D\n \t\tnonD = ~D.squeeze(-1)\n\n \t\tif nonD.sum() == 0:\n \t\t print(f'[ Epoch {n} Model Rollout ] Breaking early: {k} | {nonD.sum()} / {nonD.shape}')\n \t\t break\n\n \t\tO = O_next[nonD]\n\n\n def sac_batch(self, real_ratio, batch_size):\n \tbatch_size_real = int(real_ratio * batch_size) # 0.05*256\n \tbatch_size_img = batch_size - batch_size_real # 256 - (0.05*256)\n \tB_real = self.buffer.sample_batch(batch_size_real, self._device_)\n\n \tif batch_size_img > 0:\n \t\tB_img = self.model_buffer.sample_batch(batch_size_img, self._device_)\n \t\tkeys = B_real.keys()\n \t\tB = {k: T.cat((B_real[k], B_img[k]), dim=0) for k in keys}\n \telse:\n \t\tB = B_real\n \treturn B\n\n\n\n\n\ndef main(exp_prefix, config, seed, device, wb):\n\n print('Start an MoPAC experiment...')\n print('\\n')\n\n configs = config.configurations\n\n if seed:\n random.seed(seed), np.random.seed(seed), T.manual_seed(seed)\n\n alg_name = configs['algorithm']['name']\n env_name = configs['environment']['name']\n env_type = configs['environment']['type']\n wm_epochs = configs['algorithm']['learning']['grad_WM_steps']\n DE = configs['world_model']['num_ensembles']\n\n group_name = f\"{env_name}-{alg_name}-X\"\n exp_prefix = f\"seed:{seed}\"\n\n if wb:\n wandb.init(\n name=exp_prefix,\n group=group_name,\n # project='test',\n project='AMMI-RL-2022',\n config=configs\n )\n\n agent = MoPAC(exp_prefix, configs, seed, device, wb)\n\n agent.learn()\n\n print('\\n')\n print('... End the MoPAC experiment')\n\n\nif __name__ == \"__main__\":\n\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-exp_prefix', type=str)\n parser.add_argument('-cfg', type=str)\n parser.add_argument('-seed', type=str)\n parser.add_argument('-device', type=str)\n parser.add_argument('-wb', type=str)\n\n args = parser.parse_args()\n\n exp_prefix = args.exp_prefix\n sys.path.append(f\"{os.getcwd()}/configs\")\n config = importlib.import_module(args.cfg)\n seed = int(args.seed)\n device = args.device\n wb = eval(args.wb)\n\n main(exp_prefix, config, seed, device, wb)\n"
] | [
[
"torch.cat",
"numpy.random.seed",
"numpy.mean",
"torch.manual_seed",
"numpy.std",
"torch.tensor",
"torch.Tensor"
]
] |
yushdotkapoor/Crypto-Siphon | [
"75da6842be8224ac283ba43489a9e1bec38e08c1"
] | [
"robinhood_starter.py"
] | [
"#robinhood_starter.py\n# Copyright Yush Raj Kapoor\n# Created 08/13/2021\n\n\n\nimport robin_stocks.robinhood as rs\nimport os\nfrom time import sleep\nfrom robin_obfuscate import *\nimport math\nimport pyrebase\nimport sys\nimport time\nfrom threading import Timer\nimport signal\nfrom numpy import *\nfrom playsound import playsound\nimport pandas as pd\nfrom pandas import read_csv\nfrom pandas import to_datetime\nfrom pandas import DataFrame\nfrom prophet import Prophet\nimport numpy as np\nimport datetime\nimport robin_creds\n\n\nconfig = robin_creds.config\n\nfirebase = pyrebase.initialize_app(config)\n\nref = firebase.database()\n\nbuy_price = 0\nsell_price = 0\n\nrobin_user = robin_creds.robin_user\nrobin_pass = robin_creds.robin_pass\n\nMAXIMUM_VALUE = 0\nMINIMUM_VALUE = 0\n\nprev_action = \"none\"\ntarget_price = 0\n\nTicker = \"\"\n\ncoins = 0\ncash = 10\n\ndata_points = 240\nslope_points = 30\nslope_tracker = []\n\n\ngolden_ratio = 1.61803398875\n\nROOT_PERC = 0.004\n\nSELLING_PERCENTAGE_MAIN = ROOT_PERC\nBUYING_PERCENTAGE_MAIN = ROOT_PERC\n\nselling_const = ROOT_PERC\nbuying_const = ROOT_PERC\n\n\ntarget_reached = False\nsafe_reached = False\n\ntracker = []\nforecasting_pts = 1000\n\nE_forecast = []\nQ_forecast = []\nH_forecast = []\nF_forecast = [0.25]\n\nfirst = True\n\nMock = True\n\nQUIT = False\n\ndef reset_High_Lows(min_val, max_val):\n global MAXIMUM_VALUE\n global MINIMUM_VALUE\n MAXIMUM_VALUE = max_val\n MINIMUM_VALUE = min_val\n \n \ndef get_forecasts():\n #a = [\"E_forecast\", \"Q_forecast\", \"H_forecast\", \"F_forecast\"]\n a = [\"F_forecast\"]\n for i in a:\n forecast(i)\n \n\ndef forecast(root):\n global E_forecast\n global Q_forecast\n global H_forecast\n global F_forecast\n a = {\"E_forecast\":8, \"Q_forecast\":4, \"H_forecast\":2, \"F_forecast\":1}\n use_data = tracker[-int(len(tracker)/a[root]):]\n times = []\n cur_time = time.time()\n \n ct = cur_time - (len(use_data) * 15)\n for i in use_data:\n times.append(datetime.datetime.fromtimestamp(ct))\n ct += 15\n \n data = {'Month':times, 'Price':use_data}\n\n df = pd.DataFrame(data)\n df.columns = ['ds', 'y']\n model = Prophet()\n model.daily_seasonality=True\n model.yearly_seasonality = False\n model.weekly_seasonality = False\n with suppress_stdout_stderr():\n model.fit(df)\n\n future = list()\n for i in range(1, 20):\n date = time.time() + (i * 15)\n future.append([datetime.datetime.fromtimestamp(date)])\n\n future = DataFrame(future)\n future.columns = ['ds']\n\n forecast = model.predict(future)\n \n if root == \"E_forecast\":\n E_forecast = forecast['yhat'].to_numpy()\n elif root == \"Q_forecast\":\n Q_forecast = forecast['yhat'].to_numpy()\n elif root == \"H_forecast\":\n H_forecast = forecast['yhat'].to_numpy()\n elif root == \"F_forecast\":\n F_forecast = forecast['yhat'].to_numpy()\n\n\ndef calculate_trend_slope(data):\n sum = 0\n av_price = ((buy_price + sell_price)/2)\n for i in range(1, len(data)):\n cur = data[i]\n prev = data[i-1]\n sum += (cur - prev) / av_price\n \n return sum / len(data)\n \n \ndef current_coins():\n positions = rs.get_crypto_positions()\n for n in positions:\n if n['currency']['code'] == Ticker:\n coins = float(n['cost_bases'][0]['direct_quantity'])\n return coins\n \n return 0\n \n \ndef get_min_max_of_tracker():\n min = 999999\n max = 0\n max_index = -1\n min_index = -1\n new = tracker[-data_points:]\n for n in list(range(len(new))):\n val = new[n]\n if val > max:\n max = val\n max_index = n\n elif val < min:\n min = val\n min_index = n\n \n return (min, max, min_index, max_index)\n \n \n\ndef trend(arr):\n lean = 0\n av_price = (buy_price + sell_price) / 2\n for n in arr:\n if av_price > n:\n #support for using min for reference\n lean += 1\n elif av_price < n:\n #support for using max for reference\n lean -= 1\n \n return lean / len(arr)\n \n\ndef calculate_immediate_slopes():\n immediate = tracker[-4]\n semi_immediate = tracker[-20]\n \n av_price = (buy_price + sell_price) / 2\n im_slope = (av_price - immediate) / av_price\n s_im_slope = (av_price - semi_immediate) / av_price\n \n return im_slope, s_im_slope\n \n \ndef volatility(arr):\n prev_tr = 0\n trend_volatility = 0\n for i in arr:\n if i > 0.1:\n if prev_tr == -1:\n trend_volatility += 1\n prev_tr = 1\n elif i < -0.1:\n if prev_tr == 1:\n trend_volatility += 1\n prev_tr = -1\n elif i != 0:\n prev_tr = i/abs(i)\n \n return trend_volatility\n \n \ndef action():\n global selling_const\n global buying_const\n \n tr1 = trend(tracker[-int(data_points/8):])\n tr2 = trend(tracker[-int(data_points/4):])\n tr3 = trend(tracker[-int(data_points/2):])\n tr4 = trend(tracker[-int(data_points/4*3):])\n tr5 = trend(tracker[-int(data_points):])\n \n a = [tr2, tr3, tr4, tr5]\n trend_volatility = volatility(a)\n is_high_volatility = False\n if trend_volatility > 1:\n is_high_volatility = True\n \n lean = (tr1 * 0.4) + (tr2 * 0.3) + (tr3 * 0.2) + (tr5 * 0.1)\n linear_tr = (tr2 + tr3 + tr4 + tr5) / 4\n \n \n immediate_slope, semi_immediate_slope = calculate_immediate_slopes()\n \n# E_tr = calculate_trend_slope(E_forecast)\n# Q_tr = calculate_trend_slope(Q_forecast)\n# H_tr = calculate_trend_slope(H_forecast)\n F_tr = calculate_trend_slope(F_forecast)\n# scaled_forcasted_slope = (E_tr * 0.1) + (Q_tr * 0.2) + (H_tr * 0.3) + (F_tr * 0.4)\n \n print(\"date\", datetime.datetime.now())\n print(\"usage_tr\",linear_tr)\n print(\"immediate_slope\", immediate_slope)\n print(\"usage_forecasted_slope\", F_tr)\n \n tr_gate = 0\n slope_gate = 0\n forecast_gate = 0\n to_act = \"None\"\n if prev_action == \"buy\":\n #looking to sell\n if linear_tr < -0.1:\n tr_gate = linear_tr * 0.1\n if immediate_slope < -0.0002:\n slope_gate = immediate_slope * 0.4\n if F_tr < -0.0001:\n forecast_gate = F_tr * 0.4\n to_act = \"sell\"\n elif prev_action == \"sell\":\n #looking to buy\n if linear_tr > 0.1:\n tr_gate = linear_tr * 0.1\n if immediate_slope > 0.0002:\n slope_gate = immediate_slope * 0.4\n if F_tr > 0.0001:\n forecast_gate = F_tr * 0.4\n to_act = \"buy\"\n \n print(\"tr_gate\", tr_gate)\n print(\"slope_gate\", slope_gate * 200)\n print(\"forecast_gate\", forecast_gate * 2000)\n \n \n min_max = get_min_max_of_tracker()\n min = min_max[0]\n max = min_max[1]\n min_index = min_max[2]\n max_index = min_max[3]\n \n min_delta = buy_price - min\n max_delta = buy_price - max\n \n min_tr = min_delta / buy_price / (data_points - min_index) * 10 * golden_ratio\n max_tr = max_delta / buy_price / (data_points - max_index) * 10 * golden_ratio\n \n print(\"lean\",lean)\n \n tr = max_tr\n if lean > 0:\n tr = min_tr\n \n selling_const = SELLING_PERCENTAGE_MAIN + tr\n buying_const = BUYING_PERCENTAGE_MAIN - tr\n \n if selling_const < 0.002:\n selling_const = 0.002\n if buying_const < 0.002:\n buying_const = 0.002\n \n \n total_gate = (tr_gate + (200 * slope_gate) + (2000 * forecast_gate))\n if to_act == \"buy\":\n if lean > 0:\n total_gate = total_gate * golden_ratio\n elif to_act == \"sell\":\n if lean < 0:\n total_gate = total_gate * golden_ratio\n \n print(\"COMPARE\",total_gate)\n if (abs(total_gate) > 0.4):\n if (prev_action == \"sell\" and total_gate > 0) or (prev_action == \"buy\" and total_gate < 0) or (first and total_gate > 0):\n first = False\n print(to_act + \" FROM COMPARE\")\n return to_act\n \n \n# #Gradient purchase\n# if prev_action == \"buy\":\n# #Looking for a selling point\n# adjusted_price = sell_price * 1.001\n# percent_difference = ((MAXIMUM_VALUE - adjusted_price) / adjusted_price)\n# print(\"MAXIMUM_VALUE {} difference {} selling_const {}\".format(MAXIMUM_VALUE, percent_difference, selling_const))\n# if percent_difference > selling_const:\n# #SELL SELL SELL\n# print(\"SELL FROM GRADIENT\", percent_difference, \">\", selling_const)\n# return \"sell\"\n# elif prev_action == \"sell\":\n# #Looking for a buying point\n# adjusted_price = buy_price * 1.001\n# percent_difference = ((adjusted_price - MINIMUM_VALUE) / adjusted_price)\n# print(\"MINIMUM_VALUE {} difference {} buying_const {}\".format(MINIMUM_VALUE, percent_difference, buying_const))\n# if percent_difference > buying_const:\n# #BUY BUY BUY\n# print(\"BUY FROM GRADIENT\", percent_difference, \">\", buying_const)\n# return \"buy\"\n \n \n #Emergency sell\n if safe_reached:\n if sell_price * 0.9995 < target_price:\n print(\"Emergency sell\")\n print(\"sell_price * 0.9995\", sell_price * 0.9995, \"target_price\",target_price)\n return \"sell\"\n \n return \"None\"\n \n \ndef set_slopes(baseline):\n global slope_tracker\n slope_tracker = []\n for n in list(range(slope_points)):\n slope_tracker.append(baseline)\n \n \ndef update_min_max():\n global MAXIMUM_VALUE\n global MINIMUM_VALUE\n \n if sell_price > MAXIMUM_VALUE:\n MAXIMUM_VALUE = sell_price\n \n if buy_price < MINIMUM_VALUE:\n MINIMUM_VALUE = buy_price\n \n \ndef update_slope():\n global slope_tracker\n for n in list(range(1, slope_points)):\n val = slope_tracker[n]\n slope_tracker[n - 1] = val\n \n slope_tracker[slope_points - 1] = buy_price\n \n \ndef get_min_max_of_slopes():\n min = 999999\n max = 0\n max_index = -1\n min_index = -1\n \n for n in list(range(len(slope_tracker))):\n val = slope_tracker[n]\n if val > max:\n max = val\n max_index = n\n elif val < min:\n min = val\n min_index = n\n \n return (min, max, min_index, max_index)\n \n\n\ndef reset_target_bool():\n global target_reached\n global safe_reached\n target_reached = False\n safe_reached = False\n \n \ndef check_tresholds():\n global target_reached\n global safe_reached\n target_tag = \"\"\n safe_tag = \"\"\n if target_reached:\n target_tag = \"REACHED\"\n if safe_reached:\n safe_tag = \"REACHED\"\n print(\"target price\",target_price, target_tag)\n print(\"safe threshold\", target_price * 1.002, safe_tag)\n if prev_action == \"buy\":\n if buy_price > target_price and not target_reached:\n target_reached = True\n playsound('Crypto-Siphon/target.mp3')\n if buy_price > (target_price * 1.001) and not safe_reached:\n safe_reached = True\n playsound('Crypto-Siphon/safeThreshold.mp3')\n\n\ndef one():\n global prev_action\n global cash\n global coins\n global buy_price\n global sell_price\n\n buy_price = float(rs.get_crypto_quote(Ticker)['ask_price'])\n sell_price = float(rs.get_crypto_quote(Ticker)['bid_price'])\n \n \n ref.child(\"Time/robinhood_\" + Ticker + \"_one\").set(time.time())\n quit = ref.child(\"engine_status/robinhood_\" + Ticker + \"_quit\").get().val()\n if quit == 1:\n quitter()\n ref.child(\"engine_status/robinhood_\" + Ticker).set(1)\n if not Mock:\n coins = current_coins()\n \n update_slope()\n update_min_max()\n \n# least = 0.0000000000001\n# l_rounded = round_up(least, round_quantity()) - least\n# if coins > l_rounded:\n# prev_action = \"buy\"\n# cash = 0\n# else:\n# prev_action = \"sell\"\n \n \n \n if QUIT:\n quitter()\n else:\n Timer(1, one).start()\n \n \ndef five():\n Timer(0.1, get_forecasts).start()\n \n ref.child(\"Time/robinhood_\" + Ticker + \"_five\").set(time.time())\n\n todo = action()\n check_tresholds()\n if Mock:\n mock_transaction(todo)\n else:\n make_transaction(todo)\n \n \n if QUIT:\n quitter()\n else:\n Timer(5, five).start()\n \n \n#def make_transaction(str):\n# global cash\n# global coins\n# global prev_action\n# global target_price\n# global MINIMUM_VALUE\n# global MAXIMUM_VALUE\n#\n# if str == 'buy':\n# coin_to_buy = float(cash / buy_price) * 0.999\n# buy_data = buy(coin_to_buy)\n# prev_action = str\n# data = gem.get_trades_for_crypto(Ticker)[0].json()[0]\n# coins = float(data['amount'])\n# executed_cash = float(data['amount']) * float(data['price'])\n# executed_fee = float(data['fee_amount'])\n# cash -= (executed_cash + executed_fee)\n# MINIMUM_VALUE = buy_price\n# target_price = float(rs.get_crypto_quote(Ticker)['ask_price'])) * 1.002\n# reset_target_bool()\n# reset_High_Lows(sell_price, buy_price)\n# print(\"status {}\\nPrice {}\\nProspects {}\\n\".format(str.upper(), sell_price, coins * sell_price))\n# print(\"SELL AT ${}\".format(target_price))\n# elif str == 'sell':\n# sell_data = sell()\n# data = gem.get_trades_for_crypto(Ticker)[0].json()[0]\n# prev_action = str\n# coins = data['amount']\n# executed_cash = float(data['amount']) * float(data['price'])\n# executed_fee = float(data['fee_amount'])\n# cash += (executed_cash + executed_fee)\n# MAXIMUM_VALUE = float(rs.get_crypto_quote(Ticker)['bid_price']))\n# target_price = float(rs.get_crypto_quote(Ticker)['bid_price'])) * 1.002\n# reset_target_bool()\n# reset_High_Lows(sell_price, buy_price)\n# print(\"status {}\\nPrice {}\\nProspects {}\\n\".format(str.upper(), buy_price, cash))\n# else:\n# status = \"NONE\"\n# pros = cash\n# price = buy_price\n# if cash == 0:\n# status = \"HELD\"\n# price = sell_price\n# pros = coins * buy_price\n#\n# print(\"status {}\\nPrice {}\\nProspects {}\\n\".format(status, price, pros))\n \n \ndef mock_transaction(str):\n#####THIS IS A MOCK\n#####NO MONEY IS USED HERE WHEN GLOBAL VAR 'MOCK' IS TRUE\n global cash\n global coins\n global prev_action\n global target_price\n global MINIMUM_VALUE\n global MAXIMUM_VALUE\n \n if str == 'buy':\n prev_action = str\n executed_cash = cash\n executed_fee = round(cash * 0.001, 2)\n cash -= executed_cash\n coins += (executed_cash - executed_fee) / buy_price\n MINIMUM_VALUE = buy_price\n target_price = buy_price * 1.002\n reset_target_bool()\n reset_High_Lows(sell_price, buy_price)\n print(\"status {}\\nPrice {}\\nProspects {}\\n\".format(str.upper(), sell_price, coins * sell_price))\n playsound('Crypto-Siphon/Buy.mp3')\n elif str == 'sell':\n prev_action = str\n executed_cash = coins * sell_price\n executed_fee = round(executed_cash * 0.001, 2)\n coins -= executed_cash / sell_price\n cash += (executed_cash - executed_fee)\n MAXIMUM_VALUE = sell_price\n target_price = sell_price * 1.002\n reset_target_bool()\n reset_High_Lows(sell_price, buy_price)\n print(\"status {}\\nPrice {}\\nProspects {}\\n\".format(str.upper(), buy_price, cash))\n playsound('Crypto-Siphon/Sell.mp3')\n else:\n status = \"NONE\"\n pros = cash\n price = buy_price\n if cash == 0:\n status = \"HELD\"\n price = sell_price\n pros = coins * buy_price\n \n print(\"status {}\\nPrice {}\\nProspects {}\\n\".format(status, price, pros))\n \n\ndef set_tracker(baseline):\n global tracker\n tracker = []\n for n in list(range(forecasting_pts)):\n tracker.append(baseline)\n\n\ndef fill_tracker_fast(ct):\n av_price = (float(rs.get_crypto_quote(Ticker)['ask_price'])) + float(rs.get_crypto_quote(Ticker)['bid_price']) / 2\n update_trackers(av_price)\n for n in list(range(1, len(tracker))):\n val = tracker[n]\n tracker[n - 1] = val\n \n tracker[len(tracker) - 1] = av_price\n if ct > 0:\n Timer(1, fill_tracker_fast(ct - 1)).start()\n \n \ndef start():\n global tracker\n global MINIMUM_VALUE\n global MAXIMUM_VALUE\n global target_price\n rs.login(username=robin_user, password=robin_pass, expiresIn=86400, by_sms=True)\n \n# if current_coins() > 1:\n# data = gem.get_trades_for_crypto(Ticker)[0].json()[0]\n# p = float(data['price'])\n# target_price = p * 1.001\n \n print(\"STARTING ROBINHOOD ENGINE FOR\", Ticker)\n \n buy_price = float(rs.get_crypto_quote(Ticker)['ask_price'])\n sell_price = float(rs.get_crypto_quote(Ticker)['bid_price'])\n set_tracker(buy_price)\n set_slopes(buy_price)\n MAXIMUM_VALUE = buy_price\n MINIMUM_VALUE = sell_price\n \n prev_fore = ref.child(\"Forecast/robinhood_\" + Ticker).get().val()\n if prev_fore != None:\n tracker = prev_fore\n else:\n ref.child(\"Forecast/robinhood_\" + Ticker).set(tracker)\n fill_tracker_fast(data_points)\n \n \n ref.child(\"Time/robinhood_\" + Ticker + \"_one\").set(time.time())\n ref.child(\"Time/robinhood_\" + Ticker + \"_five\").set(time.time())\n ref.child(\"engine_status/robinhood_\" + Ticker + \"_quit\").set(0)\n \n Timer(5, five).start()\n Timer(1, one).start()\n \n \n \ndef buy(price):\n rs.orders.order_buy_crypto_by_price(Ticker, price, timeInForce='gtc')\n print(\"Buying \" + str(price) + \" of \" + Ticker)\n playsound('Crypto-Siphon/Buy.mp3')\n\n\ndef sell():\n coins = current_coins()\n bid_price = float(rs.get_crypto_quote(Ticker)['bid_price'])\n price_to_sell = bid_price * coins\n rs.order_sell_crypto_by_quantity(Ticker, coins)\n print(\"Selling $\" + str(price_to_sell) + \" of \" + Ticker + \" at \" + str(bid_price) + \" each\")\n playsound('Crypto-Siphon/Sell.mp3')\n \n \n \ndef quitter():\n global QUIT\n QUIT = True\n if Ticker != None:\n ref.child(\"engine_status/robinhood_\" + Ticker).set(0)\n ref.child(\"engine_status/robinhood_\" + Ticker + \"_quit\").set(0)\n os._exit(os.EX_OK)\n \n \ndef handler(signum, frame):\n print('KeyboardInturrupt')\n print('Process Manually Stopped')\n quitter()\n\nsignal.signal(signal.SIGINT, handler)\n \n \nif __name__ == '__main__':\n Ticker = input('Enter Crypto Symbol: ').upper()\n start()\n \n\n\n\nclass suppress_stdout_stderr(object):\n '''\n A context manager for doing a \"deep suppression\" of stdout and stderr in\n Python, i.e. will suppress all print, even if the print originates in a\n compiled C/Fortran sub-function.\n This will not suppress raised exceptions, since exceptions are printed\n to stderr just before a script exits, and after the context manager has\n exited (at least, I think that is why it lets exceptions through).\n\n '''\n def __init__(self):\n # Open a pair of null files\n self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]\n # Save the actual stdout (1) and stderr (2) file descriptors.\n self.save_fds = [os.dup(1), os.dup(2)]\n\n def __enter__(self):\n # Assign the null pointers to stdout and stderr.\n os.dup2(self.null_fds[0], 1)\n os.dup2(self.null_fds[1], 2)\n\n def __exit__(self, *_):\n # Re-assign the real stdout/stderr back to (1) and (2)\n os.dup2(self.save_fds[0], 1)\n os.dup2(self.save_fds[1], 2)\n # Close the null files\n for fd in self.null_fds + self.save_fds:\n os.close(fd)\n\n\n"
] | [
[
"pandas.DataFrame"
]
] |
henrysky/astroNN | [
"a8358137f09bf964ec34faa2a19d2efc1d7c3557"
] | [
"tests/test_apogee_tools.py"
] | [
"import unittest\n\nimport numpy as np\nimport numpy.testing as npt\nfrom astroNN.apogee import (\n gap_delete,\n apogee_default_dr,\n bitmask_decompositor,\n chips_split,\n bitmask_boolean,\n apogee_continuum,\n aspcap_mask,\n combined_spectra,\n visit_spectra,\n)\nfrom astroNN.apogee.apogee_shared import apogeeid_digit\n\n\nclass ApogeeToolsCase(unittest.TestCase):\n def test_apogee_tools(self):\n # Example data\n raw_spectra = np.ones((10, 8575))\n raw_spectrum = np.ones(8575)\n wrong_spectrum = np.ones(1024)\n\n gap_deleted = gap_delete(raw_spectra)\n self.assertEqual(gap_deleted.shape == (10, 7514), True)\n gap_deleted = gap_delete(raw_spectrum)\n self.assertEqual(gap_deleted.shape == (1, 7514), True)\n gap_deleted = gap_delete(raw_spectra, dr=12)\n self.assertEqual(gap_deleted.shape == (10, 7214), True)\n gap_deleted = gap_delete(raw_spectrum, dr=12)\n self.assertEqual(gap_deleted.shape == (1, 7214), True)\n self.assertRaises(EnvironmentError, gap_delete, wrong_spectrum)\n\n # check gaia default dr\n dr = apogee_default_dr()\n self.assertEqual(dr, 16)\n dr = apogee_default_dr(dr=3)\n self.assertEqual(dr, 3)\n\n # bitmask\n self.assertEqual(bitmask_decompositor(0), None)\n npt.assert_array_equal(bitmask_decompositor(1), [0])\n npt.assert_array_equal(bitmask_decompositor(3), [0, 1])\n npt.assert_array_equal(bitmask_boolean([0, 1, 2], [0]), [[False, True, False]])\n self.assertRaises(ValueError, bitmask_decompositor, -1)\n\n # chips_split\n blue, green, red = chips_split(raw_spectra)\n self.assertEqual(\n np.concatenate((blue, green, red), axis=1).shape == (10, 7514), True\n )\n blue, green, red = chips_split(raw_spectrum)\n self.assertEqual(\n np.concatenate((blue, green, red), axis=1).shape == (1, 7514), True\n )\n self.assertRaises(ValueError, chips_split, raw_spectra, dr=10)\n\n def test_apogee_continuum(self):\n raw_spectra = np.ones((10, 8575)) * 2\n raw_spectra_err = np.zeros((10, 8575))\n # continuum\n cont_spectra, cont_spectra_arr = apogee_continuum(raw_spectra, raw_spectra_err)\n self.assertAlmostEqual(float(np.mean(cont_spectra)), 1.0)\n\n def test_apogee_digit_extractor(self):\n # Test apogeeid digit extractor\n # just to make no error\n apogeeid_digit([\"2M00380508+5608579\", \"2M00380508+5608579\"])\n apogeeid_digit(np.array([\"2M00380508+5608579\", \"2M00380508+5608579\"]))\n\n # check accuracy\n self.assertEqual(apogeeid_digit(\"2M00380508+5608579\"), \"2003805085608579\")\n npt.assert_array_equal(\n apogeeid_digit(np.array([\"2M00380508+5608579\", \"2M00380508+5608579\"])),\n [\"2003805085608579\", \"2003805085608579\"],\n )\n\n def test_aspcap_mask(self):\n self.assertEqual(np.all(aspcap_mask(\"C1\") == aspcap_mask(\"ci\")), True)\n self.assertEqual(np.all(aspcap_mask(\"TIII\") == aspcap_mask(\"ti2\")), True)\n # assert for example dr=1 is not supported\n self.assertRaises(ValueError, aspcap_mask, \"al\", 1)\n # Make sure if element not found, the case is nicely handled\n self.assertEqual(aspcap_mask(\"abc\"), None)\n\n\nclass ApogeeDownloaderCase(unittest.TestCase):\n def test_apogee_combined_download(self):\n \"\"\"\n Test APOGEE combined spectra downloading function, assert functions can deal with missing files\n \"\"\"\n # make sure the download works correctly\n combined_spectra(dr=13, location=4405, apogee=\"2M19060637+4717296\")\n combined_spectra(dr=14, location=4405, apogee=\"2M19060637+4717296\")\n combined_spectra(\n dr=16, field=\"K06_078+16\", telescope=\"apo25m\", apogee=\"2M19060637+4717296\"\n )\n combined_spectra(\n dr=17, field=\"K06_078+16\", telescope=\"apo25m\", apogee=\"2M19060637+4717296\"\n )\n # assert False is returning if file not found\n self.assertEqual(\n combined_spectra(dr=13, location=4406, apogee=\"2M19060637+4717296\"), False\n )\n self.assertEqual(\n combined_spectra(dr=14, location=4406, apogee=\"2M19060637+4717296\"), False\n )\n self.assertEqual(\n combined_spectra(\n dr=16,\n field=\"K06_078+17\",\n telescope=\"apo25m\",\n apogee=\"2M19060637+4717296\",\n ),\n False,\n )\n self.assertEqual(\n combined_spectra(\n dr=17,\n field=\"K06_078+17\",\n telescope=\"apo25m\",\n apogee=\"2M19060637+4717296\",\n ),\n False,\n )\n # assert error if DR not supported\n self.assertRaises(\n ValueError,\n combined_spectra,\n dr=1,\n location=4406,\n apogee=\"2M19060637+4717296\",\n )\n\n def test_apogee_visit_download(self):\n \"\"\"\n Test APOGEE visits spectra downloading function, assert functions can deal with missing files\n \"\"\"\n # make sure the download works correctly\n visit_spectra(dr=13, location=4405, apogee=\"2M19060637+4717296\")\n visit_spectra(dr=14, location=4405, apogee=\"2M19060637+4717296\")\n visit_spectra(\n dr=16, field=\"K06_078+16\", telescope=\"apo25m\", apogee=\"2M19060637+4717296\"\n )\n visit_spectra(\n dr=17, field=\"K06_078+16\", telescope=\"apo25m\", apogee=\"2M19060637+4717296\"\n )\n # assert False is returning if file not found\n self.assertEqual(\n visit_spectra(dr=13, location=4406, apogee=\"2M19060637+4717296\"), False\n )\n self.assertEqual(\n visit_spectra(dr=14, location=4406, apogee=\"2M19060637+4717296\"), False\n )\n self.assertEqual(\n visit_spectra(\n dr=16,\n field=\"K06_078+17\",\n telescope=\"apo25m\",\n apogee=\"2M19060637+4717296\",\n ),\n False,\n )\n self.assertEqual(\n visit_spectra(\n dr=17,\n field=\"K06_078+17\",\n telescope=\"apo25m\",\n apogee=\"2M19060637+4717296\",\n ),\n False,\n )\n # assert error if DR not supported\n self.assertRaises(\n ValueError, visit_spectra, dr=1, location=4406, apogee=\"2M19060637+4717296\"\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.ones",
"numpy.mean"
]
] |
QinJinghui/MSFFRN | [
"b55774c01374e6718cd1fd324d1fcaeb5002d00f"
] | [
"src/trainer.py"
] | [
"from decimal import Decimal\n\nimport torch\nimport torch.nn.utils as utils\nfrom tqdm import tqdm\n\nimport utility\n\n\nclass Trainer:\n def __init__(self, args, loader, my_model, my_loss, ckp):\n self.args = args\n self.scale = args.scale\n\n self.ckp = ckp\n self.loader_train = loader.loader_train\n self.loader_test = loader.loader_test\n self.model = my_model\n self.loss = my_loss\n self.optimizer = utility.make_optimizer(args, self.model)\n\n if self.args.load != '':\n self.optimizer.load(ckp.dir, epoch=len(ckp.log))\n \n self.error_last = 1e8\n\n def train(self):\n self.optimizer.schedule()\n self.loss.step()\n epoch = self.optimizer.get_last_epoch() + 1\n lr = self.optimizer.get_lr()\n\n self.ckp.write_log(\n \"[Epoch {}]\\tLearning rate: {:.2e}\".format(epoch, Decimal(lr))\n )\n self.loss.start_log()\n self.model.train()\n\n timer_data, timer_model = utility.timer(), utility.timer()\n for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train):\n lr, hr = self.prepare(lr, hr)\n timer_data.hold()\n timer_model.tic()\n\n self.optimizer.zero_grad()\n sr = self.model(lr, idx_scale)\n loss = self.loss(sr, hr)\n loss.backward()\n if self.args.gclip > 0:\n utils.clip_grad_value_(self.model.parameters(), self.args.gclip)\n self.optimizer.step()\n\n timer_model.hold()\n\n if (batch + 1) % self.args.print_every == 0:\n self.ckp.write_log(\n \"[{}/{}]\\t{}\\t{:.1f}+{:.1f}s\".format(\n (batch + 1) * self.args.batch_size,\n len(self.loader_train.dataset),\n self.loss.display_loss(batch),\n timer_model.release(),\n timer_data.release(),\n )\n )\n\n timer_data.tic()\n\n self.loss.end_log(len(self.loader_train))\n self.error_last = self.loss.log[-1, -1]\n\n def test(self):\n torch.set_grad_enabled(False)\n\n epoch = self.optimizer.get_last_epoch() + 1\n self.ckp.write_log('\\nEvaluation:')\n self.ckp.add_log(\n torch.zeros(1, len(self.loader_test), len(self.scale))\n )\n self.model.eval()\n\n timer_test = utility.timer()\n if self.args.save_results:\n self.ckp.begin_background()\n for idx_data, d in enumerate(self.loader_test):\n for idx_scale, scale in enumerate(self.scale):\n d.dataset.set_scale(idx_scale)\n for lr, hr, filename, _ in tqdm(d, ncols=80):\n lr, hr = self.prepare(lr, hr)\n sr = self.model(lr, idx_scale)\n sr = utility.quantize(sr, self.args.rgb_range)\n\n save_list = [sr]\n self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(\n sr, hr, scale, self.args.rgb_range, dataset=d\n )\n if self.args.save_gt:\n save_list.extend([lr, hr])\n\n if self.args.save_results:\n self.ckp.save_results(d, filename[0], save_list, scale)\n\n self.ckp.log[-1, idx_data, idx_scale] /= len(d)\n best = self.ckp.log.max(0)\n self.ckp.write_log(\n \"[{} x{}]\\tPSNR: {:.3f} (Best: {:.3f} @epoch {})\".format(\n d.dataset.name,\n scale,\n self.ckp.log[-1, idx_data, idx_scale],\n best[0][idx_data, idx_scale],\n best[1][idx_data, idx_scale] + 1,\n )\n )\n\n self.ckp.write_log(\"Forward: {:.2f}s\\n\".format(timer_test.toc()))\n self.ckp.write_log(\"Saving...\")\n\n if self.args.save_results:\n self.ckp.end_background()\n\n if not self.args.test_only:\n self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))\n\n self.ckp.write_log(\"Total: {:.2f}s\\n\".format(timer_test.toc()), refresh=True)\n\n torch.set_grad_enabled(True)\n\n def prepare(self, *args):\n device = torch.device(\"cpu\" if self.args.cpu else \"cuda\")\n\n def _prepare(tensor):\n if self.args.precision == \"half\":\n tensor = tensor.half()\n return tensor.to(device)\n\n return [_prepare(a) for a in args]\n\n def terminate(self):\n if self.args.test_only:\n self.test()\n return True\n else:\n epoch = self.optimizer.get_last_epoch() + 1\n return epoch >= self.args.epochs\n"
] | [
[
"torch.device",
"torch.set_grad_enabled"
]
] |
OscarWang114/blueoil | [
"f1835b6f82b4c54725a0be0744708612399edb45"
] | [
"lmnet/lmnet/networks/optical_flow_estimation/data_augmentor.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright 2019 The Blueoil Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\nimport math\nimport numpy as np\nfrom scipy import ndimage\n\nfrom abc import ABCMeta, abstractmethod\nfrom PIL import Image, ImageEnhance, ImageFilter\n\n\nclass Augmentor(metaclass=ABCMeta):\n def __repr__(self):\n return \"{}({})\".format(self.__class__.__name__, self.__dict__)\n\n @abstractmethod\n def __call__(self, *args, **kwargs):\n NotImplementedError()\n\n def split_input_tensor(self, input_tensor):\n \"\"\"\n input_tensor: np.ndarray with shape (H, W, 6)\n return: ndarray(H, W, 3), ndarray(H, W, 3)\n \"\"\"\n return input_tensor[..., :3], input_tensor[..., 3:]\n\n\nclass ColorConverter(Augmentor):\n \"\"\"\n Augmentors converting pixel color properties\n \"\"\"\n def __call__(self, image, *args, **kwargs):\n image_a, image_b = self.split_input_tensor(image)\n factor = np.random.uniform(self.min_value, self.max_value)\n processed_tensor = np.concatenate([\n self.process(image_a, factor),\n self.process(image_b, factor)\n ], axis=2)\n return dict({\"image\": processed_tensor}, **kwargs)\n\n def process(self, *args, **kwargs):\n NotImplementedError()\n\n\nclass Brightness(ColorConverter):\n \"\"\"\n Adjusting image brightness.\n reference:\n https://pillow.readthedocs.io/en/stable/reference/ImageEnhance.html#PIL.ImageEnhance.PIL.ImageEnhance.Brightness\n args: min_value, max_value:\n An enhancement factor of 0.0 gives a black image.\n A factor of 1.0 gives the original image.\n \"\"\"\n def __init__(self, min_value=0.75, max_value=1.25):\n assert min_value >= 0 and max_value >= 0, \"Negative value not allowed!\"\n self.min_value, self.max_value = min_value, max_value\n\n def process(self, image, factor):\n pil_image = Image.fromarray(image)\n enhancer = ImageEnhance.Brightness(pil_image)\n processed_image = enhancer.enhance(factor)\n return np.array(processed_image)\n\n\nclass Color(ColorConverter):\n \"\"\"\n Adjusting image color.\n reference:\n https://pillow.readthedocs.io/en/stable/reference/ImageEnhance.html#PIL.ImageEnhance.PIL.ImageEnhance.Color\n args: min_value, max_value\n An enhancement factor of 0.0 gives a black and white image.\n A factor of 1.0 gives the original image.\n \"\"\"\n def __init__(self, min_value=0.75, max_value=1.25):\n assert min_value >= 0 and max_value >= 0, \"Negative value not allowed!\"\n self.min_value, self.max_value = min_value, max_value\n\n def process(self, image, factor):\n pil_image = Image.fromarray(image)\n enhancer = ImageEnhance.Color(pil_image)\n processed_image = enhancer.enhance(factor)\n return np.array(processed_image)\n\n\nclass Contrast(ColorConverter):\n \"\"\"\n Adjusting image contrast.\n reference:\n https://pillow.readthedocs.io/en/stable/reference/ImageEnhance.html#PIL.ImageEnhance.PIL.ImageEnhance.Contrast\n args: min_value, max_value\n An enhancement factor of 0.0 gives a solid grey image.\n A factor of 1.0 gives the original image.\n \"\"\"\n def __init__(self, min_value=0.75, max_value=1.25):\n assert min_value >= 0 and max_value >= 0, \"Negative value not allowed!\"\n self.min_value, self.max_value = min_value, max_value\n\n def process(self, image, factor):\n pil_image = Image.fromarray(image)\n enhancer = ImageEnhance.Contrast(pil_image)\n processed_image = enhancer.enhance(factor)\n return np.array(processed_image)\n\n\nclass Hue(ColorConverter):\n \"\"\"\n Adjusting image hue.\n args: min_value, max_value\n An enhancement factor of 0.0 gives a solid grey image.\n A factor of 1.0 gives the original image.\n \"\"\"\n def __init__(self, min_value=-10.0, max_value=10.0):\n assert min_value > -255 and max_value < 255, \\\n \"Value range should be within (-255, 255)!\"\n self.min_value, self.max_value = min_value, max_value\n\n def process(self, image, factor):\n pil_image = Image.fromarray(image)\n hsv_image = np.array(pil_image.convert(\"HSV\"))\n hsv_image[:, :, 0] = hsv_image[:, :, 0] + factor\n processed_image = Image.fromarray(hsv_image, \"HSV\").convert(\"RGB\")\n return np.array(processed_image)\n\n\nclass Gamma(ColorConverter):\n \"\"\"\n Gamma blur filter.\n \"\"\"\n def __init__(self, min_value=0.0, max_value=1.0):\n assert min_value >= 0 and max_value >= 0, \"Negative value not allowed!\"\n self.min_value, self.max_value = min_value, max_value\n\n def process(self, image, gamma):\n processed_image = (((image / 255.0) ** gamma) * 255.0).astype(np.uint8)\n return processed_image\n\n\nclass GaussianBlur(ColorConverter):\n \"\"\"\n Gaussian blur filter.\n reference:\n https://pillow.readthedocs.io/en/stable/reference/ImageFilter.html#PIL.ImageFilter.GaussianBlur\n args: min_value, max_value\n References default is 2.\n \"\"\"\n\n def __init__(self, min_value=0.0, max_value=1.0):\n assert min_value >= 0 and max_value >= 0, \"Negative value not allowed!\"\n self.min_value, self.max_value = min_value, max_value\n\n def process(self, image, radius):\n pil_image = Image.fromarray(image)\n processed_image = pil_image.filter(ImageFilter.GaussianBlur(radius))\n return np.array(processed_image)\n\n\nclass GaussianNoise(Augmentor):\n \"\"\"\n Additive Gaussian noise.\n \"\"\"\n def __init__(self, min_value=0.0, max_value=1.0):\n assert min_value >= 0 and max_value >= 0, \"Negative value not allowed!\"\n self.min_value, self.max_value = min_value, max_value\n\n def __call__(self, image, label, **kwargs):\n # print(image.shape)\n noise_amp = np.random.uniform(self.min_value, self.max_value)\n image_noise = noise_amp * np.random.randn(*image.shape)\n processed_image = image + image_noise\n processed_image[processed_image < 0] = 0\n processed_image[processed_image > 255] = 255\n processed_image = processed_image.astype(np.uint8)\n return dict({\n \"image\": processed_image, \"label\": label}, **kwargs)\n\n\nclass FlipTopBottom(Augmentor):\n \"\"\"\n Flip top bottom.\n args: probability\n Probability for flipping.\n \"\"\"\n def __init__(self, prob=0.5):\n self.prob = prob\n\n def __call__(self, image, label, **kwargs):\n if np.random.rand() < self.prob:\n image = image[::-1, ...]\n label = label[::-1, ...]\n label[:, :, 1] *= -1.0\n return dict({\n \"image\": image, \"label\": label}, **kwargs)\n\n\nclass FlipLeftRight(Augmentor):\n \"\"\"\n Flip left right.\n args: probability\n Probability for flipping.\n \"\"\"\n def __init__(self, prob=0.5):\n self.prob = prob\n\n def __call__(self, image, label, **kwargs):\n if np.random.rand() < self.prob:\n image = image[:, ::-1, :]\n label = label[:, ::-1, :]\n label[:, :, 0] *= -1.0\n return dict({\n \"image\": image, \"label\": label}, **kwargs)\n\n\nclass Identity(Augmentor):\n \"\"\"\n create the pair of images with no change\n args: probability\n Probability for applying this process.\n \"\"\"\n def __init__(self, prob=0.1):\n self.prob = prob\n\n def __call__(self, image, label, **kwargs):\n if np.random.rand() < self.prob:\n image[..., :3] = image[..., 3:]\n label[:] = 0.0\n return dict({\n \"image\": image, \"label\": label}, **kwargs)\n\n\nclass Rotate(Augmentor):\n \"\"\"\n Rotating image\n \"\"\"\n def __init__(self, min_value=-15, max_value=15):\n self.min_value, self.max_value = min_value, max_value\n\n def __call__(self, image, label, **kwargs):\n ang = np.random.uniform(self.min_value, self.max_value)\n deg = ang * np.pi / 180\n rot_mat = np.array([\n [np.cos(deg), -np.sin(deg)],\n [np.sin(deg), np.cos(deg)],\n ])\n image_new = ndimage.rotate(image, ang, reshape=False, cval=0.0)\n image_new = image_new.astype(np.uint8)\n flow_new = np.array(label.dot(rot_mat.T))\n flow_new = ndimage.rotate(flow_new, ang, reshape=False, cval=0.0)\n return dict({\n \"image\": image_new, \"label\": flow_new}, **kwargs)\n\n\nclass Scale(Augmentor):\n \"\"\"\n Scaling image\n \"\"\"\n def __init__(self, min_value=1.0, max_value=2.0):\n assert min_value >= 1.0 or max_value >= 1.0, \\\n \"scaling parameter should be greater than 1.0\"\n self.min_value, self.max_value = min_value, max_value\n\n def random_crop(self, data, crop_size):\n height, width, _ = data.shape\n if height == crop_size[0] or width == crop_size[1]:\n return data\n top = np.random.randint(0, height - crop_size[0])\n left = np.random.randint(0, width - crop_size[1])\n bottom = top + crop_size[0]\n right = left + crop_size[1]\n return data[top:bottom, left:right, :]\n\n def __call__(self, image, label, **kwargs):\n image_size = image.shape[:2]\n factor = np.random.uniform(self.min_value, self.max_value)\n data = np.concatenate([image, label * factor], axis=2)\n zoomed_data = ndimage.zoom(data, [factor, factor, 1], order=1)\n data = self.random_crop(zoomed_data, crop_size=image_size)\n image_new = data[..., :-2]\n image_new[image_new < 0] = 0\n image_new[image_new > 255] = 255\n image_new = image_new.astype(np.uint8)\n label_new = data[..., -2:]\n return dict({\"image\": image_new, \"label\": label_new}, **kwargs)\n\n\nclass Translate(Augmentor):\n \"\"\"\n Shifting image\n \"\"\"\n def __init__(self, min_value=-0.2, max_value=0.2):\n self.min_value, self.max_value = min_value, max_value\n\n def __call__(self, image, label, **kwargs):\n image_size = image.shape[:2]\n dh = np.random.uniform(self.min_value, self.max_value)\n dw = np.random.uniform(self.min_value, self.max_value)\n shift = [int(image_size[0] * dh), int(image_size[1] * dw), 0]\n shifted_image = ndimage.shift(image, shift, order=1, cval=0)\n shifted_label = ndimage.shift(label, shift, order=1, cval=0)\n return dict({\"image\": shifted_image, \"label\": shifted_label}, **kwargs)\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.sin",
"numpy.random.rand",
"scipy.ndimage.rotate",
"scipy.ndimage.shift",
"numpy.random.randn",
"numpy.random.uniform",
"numpy.random.randint",
"numpy.cos",
"scipy.ndimage.zoom"
]
] |
jpyne17/surname-origin | [
"39587995b6f6dd1beef47819f0b7a2a78a325a5b"
] | [
"src/utils.py"
] | [
"import glob\nimport os\nimport string\nimport unicodedata\nfrom collections import OrderedDict\nimport torch\n\n\nclass TextFileLoader:\n \"\"\"This class finds name text files in a given directory, and can then process them into a dict.\n\n Args:\n text_files_dir (str): Filepath to directory with names text files situated within.\n\n Attributes:\n filepaths (:list:`str`): Filepaths of .txt files.\n \"\"\"\n all_letters = string.ascii_letters + \" .,;'\"\n n_letters = len(all_letters)\n\n def __init__(self, text_files_dir):\n self.filepaths = glob.glob(os.path.join(text_files_dir, '*.txt'))\n\n @staticmethod\n def unicodeToAscii(unicode):\n return ''.join(\n c for c in unicodedata.normalize('NFD', unicode)\n if unicodedata.category(c) != 'Mn'\n and c in TextFileLoader.all_letters\n )\n\n @staticmethod\n def readLinesIntoList(filepath):\n lines = open(filepath, encoding='utf-8').read().strip().split('\\n')\n return [TextFileLoader.unicodeToAscii(line) for line in lines]\n\n def createDict(self):\n names_dict = OrderedDict()\n for filename in self.filepaths:\n category = filename.split('/')[-1].split('.')[0]\n lines = TextFileLoader.readLinesIntoList(filename)\n names_dict[category] = lines\n return names_dict\n\ndef _letter_to_index(letter):\n \"\"\"This function takes a letter and returns an index corresponding to TextFileLoader.all_letters.\n\n Args:\n letter (str): Single character string, length one. Must be ASCII.\n\n Returns:\n int: Index corresponding to TextFileLoader.all_letters.\n\n Raises:\n ValueError: If `letter` is a string of other than length one.\n TypeError: If `letter` is not `str` type.\n \"\"\"\n if type(letter) != str:\n raise TypeError('letter must be a string')\n if len(letter) != 1:\n raise ValueError('letter must be a string of length one')\n\n return TextFileLoader.all_letters.find(letter)\n\ndef _letter_to_tensor(letter):\n index = _letter_to_index(letter)\n\n tensor = torch.zeros(1, TextFileLoader.n_letters)\n tensor[0][index] = 1\n\n return tensor\n\ndef word_to_tensor(line):\n tensor = torch.zeros(len(line), 1, TextFileLoader.n_letters)\n for i, letter in enumerate(line):\n tensor[i][0][_letter_to_index(letter)] = 1\n return tensor"
] | [
[
"torch.zeros"
]
] |
sharif-42/All_About_DS | [
"c8fa254bb27943ff23f15c9f4175f51a596f30d0"
] | [
"Libraries/Matplotlib/graph_ploting.py"
] | [
"\"\"\"\nhttps://towardsdatascience.com/introduction-to-matplotlib-in-python-5f5a9919991f\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\n# Draw Line Graph\nx_values = [0, 1, 2, 3, 4, 5, 6, 7, 8]\nsquares = [0, 1, 4, 9, 16, 25, 36, 49, 64]\nsquares_ = [10, 15, 14, 19, 17, 25, 36, 49, 74]\n\nplt.plot(x_values, squares)\nplt.title = \"Line Drawing\"\nplt.show()\n\n# Bar graphs\nplt.bar(x_values, squares, color=\"green\")\nplt.show()\n\n# Bar Graph Horizontal\nplt.barh(x_values, squares, color=\"green\")\nplt.show()\n\n# Scatter Plots\nplt.scatter(x_values, squares, s=30, color=\"red\")\nplt.scatter(x_values, squares_, s=30, marker='*', color=\"green\")\nplt.show()\n\n# Histogram\nplt.hist(squares_, bins=15, color='blue', alpha=0.5)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.bar"
]
] |
colin-zhou/fast-ta | [
"0862dc4ea230c3645d26803fde908a3e7b1733da"
] | [
"tests/tests.py"
] | [
"import numpy as np\nimport csv\nimport ta\nimport pandas\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nimport fast_ta\nimport argparse\n\n# ARGPARSE CODE\nparser = argparse.ArgumentParser(description='Fast-TA lib testing tool.')\nparser.add_argument('--show-plots', dest='show_plots', action='store_const',\n const=True, default=False,\n help='display interactive matplotlib plots')\nparser.add_argument('--save-plots', dest='save_plots', action='store_const',\n const=True, default=False,\n help='save matplotlib plots')\nparser.add_argument('--use-large-dataset', dest='large_dataset', action='store_const',\n const=True, default=False,\n help='use a larger dataset for testing')\nargs = parser.parse_args()\n\nif not args.save_plots and not args.show_plots:\n parser.print_help()\n exit()\n#\nif not args.large_dataset:\n with open('tests/AAPL_small.csv', 'r') as dataset:\n csv_data = list(csv.reader(dataset))[1:]\n\n close_data = []\n high_data = []\n low_data = []\n open_data = []\n volume_data = []\n\n for row in csv_data:\n close_data.append(row[4])\n high_data.append(row[2])\n low_data.append(row[3])\n open_data.append(row[1])\n volume_data.append(row[6])\nelse:\n with open('tests/AAPL.csv', 'r') as dataset:\n csv_data = list(csv.reader(dataset))[1:]\n\n close_data = []\n high_data = []\n low_data = []\n open_data = []\n volume_data = []\n\n for row in csv_data:\n close_data.append(row[1])\n high_data.append(row[2])\n low_data.append(row[3])\n open_data.append(row[4])\n volume_data.append(row[6])\n\n while 'null' in close_data: close_data.remove('null')\n while 'null' in high_data: high_data.remove('null')\n while 'null' in low_data: low_data.remove('null')\n while 'null' in open_data: open_data.remove('null')\n\nclose_data = np.array(close_data, dtype=np.float64)\nhigh_data = np.array(high_data, dtype=np.float64)\nlow_data = np.array(low_data, dtype=np.float64)\nopen_data = np.array(open_data, dtype=np.float64)\nvolume_data = np.array(volume_data, dtype=np.float64)\n\n\ndef rsi():\n plt.clf()\n plt.title(\"RSI \"+str(close_data.dtype))\n plt.plot(fast_ta.momentum.RSI(close=close_data, n = 14))\n plt.plot(ta.momentum.RSIIndicator(pandas.Series(close_data), window=14).rsi())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/RSI \" + str(close_data.dtype) + \".svg\")\n \ndef ao():\n plt.clf()\n plt.title(\"AO \"+str(high_data.dtype))\n plt.plot(ta.momentum.AwesomeOscillatorIndicator(pandas.Series(high_data), pandas.Series(low_data)).awesome_oscillator())\n plt.plot(fast_ta.momentum.AO(high=high_data, low=low_data, s = 5, l = 34))\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/AO \" + str(high_data.dtype) + \".svg\")\n \ndef kama():\n plt.clf()\n plt.title(\"KAMA \"+str(close_data.dtype))\n plt.plot(fast_ta.momentum.KAMA(close=close_data, n = 10, f = 2, s = 30))\n plt.plot(list(ta.momentum.KAMAIndicator(pandas.Series(close_data)).kama()))\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/KAMA \" + str(close_data.dtype) + \".svg\")\n\ndef roc():\n plt.clf()\n plt.title(\"ROC \"+str(close_data.dtype))\n plt.plot(fast_ta.momentum.ROC(close = close_data, n = 12))\n plt.plot(list(ta.momentum.ROCIndicator(pandas.Series(close_data), window=12).roc()))\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/ROC \" + str(close_data.dtype) + \".svg\")\n\ndef stoch():\n plt.clf()\n plt.title(\"Stochastic Oscillator \"+str(close_data.dtype))\n so = fast_ta.momentum.StochasticOscillator(high = high_data, low = low_data, close = close_data, n = 14, d_n = 3)\n plt.plot(so[0])\n sot = ta.momentum.StochasticOscillator(pandas.Series(high_data), pandas.Series(low_data), pandas.Series(close_data))\n plt.plot(sot.stoch())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/STOCH \" + str(close_data.dtype) + \".svg\")\n\n plt.clf()\n plt.title(\"Stochastic Oscillator Signal \"+str(close_data.dtype))\n plt.plot(so[1])\n plt.plot(sot.stoch_signal())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/STOCH SIGNAL \" + str(close_data.dtype) + \".svg\")\n\ndef tsi():\n plt.clf()\n plt.title(\"TSI \"+str(close_data.dtype))\n plt.plot(fast_ta.momentum.TSI(close=close_data, r = 25, s = 13))\n plt.plot(list(ta.momentum.TSIIndicator(pandas.Series(close_data)).tsi()))\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/TSI \" + str(close_data.dtype) + \".svg\")\n\ndef uo():\n plt.clf()\n plt.title(\"Ultimate Oscillator \"+str(close_data.dtype))\n so = fast_ta.momentum.UltimateOscillator(high=high_data, low=low_data, close=close_data, s = 7, m = 14, l = 28, ws = 4, wm = 2, wl = 1)\n plt.plot(so)\n sot = ta.momentum.UltimateOscillator(pandas.Series(high_data), pandas.Series(low_data), pandas.Series(close_data))\n plt.plot(sot.ultimate_oscillator())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/UO \" + str(close_data.dtype) + \".svg\")\n\ndef wr():\n plt.clf()\n plt.title(\"Williams %R \"+str(close_data.dtype))\n so = fast_ta.momentum.WilliamsR(high=high_data, low=low_data, close=close_data, n=14)\n plt.plot(so)\n sot = ta.momentum.WilliamsRIndicator(pandas.Series(high_data), pandas.Series(low_data), pandas.Series(close_data))\n plt.plot(sot.williams_r())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/WR \" + str(close_data.dtype) + \".svg\")\n\ndef adi():\n plt.clf()\n plt.title(\"Accumulation/Distribution Index (ADI) \"+str(close_data.dtype))\n so = fast_ta.volume.ADI(high=high_data, low=low_data, close=close_data, volume=volume_data)\n plt.plot(so)\n sot = ta.volume.AccDistIndexIndicator(pandas.Series(high_data), pandas.Series(low_data), pandas.Series(close_data), pandas.Series(volume_data))\n plt.plot(sot.acc_dist_index())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/ADI \" + str(close_data.dtype) + \".svg\")\n \ndef cmf():\n plt.clf()\n plt.title(\"Chaikin Money Flow (CMF) \"+str(close_data.dtype))\n so = fast_ta.volume.CMF(high=high_data, low=low_data, close=close_data, volume=volume_data, n=20)\n plt.plot(so)\n sot = ta.volume.ChaikinMoneyFlowIndicator(pandas.Series(high_data), pandas.Series(low_data), pandas.Series(close_data), pandas.Series(volume_data))\n plt.plot(sot.chaikin_money_flow())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/CMF \" + str(close_data.dtype) + \".svg\")\n\ndef emv():\n plt.clf()\n plt.title(\"Ease of movement (EoM, EMV) \"+str(close_data.dtype))\n so = fast_ta.volume.EMV(high = high_data, low = low_data, volume = volume_data, n = 14)\n plt.plot(so[0])\n sot = ta.volume.EaseOfMovementIndicator(pandas.Series(high_data), pandas.Series(low_data), pandas.Series(volume_data))\n plt.plot(sot.ease_of_movement())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/EMV \" + str(close_data.dtype) + \".svg\")\n\n plt.clf()\n plt.title(\"Ease of movement (EoM, EMV) Signal \"+str(close_data.dtype))\n plt.plot(so[1])\n plt.plot(sot.sma_ease_of_movement())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/EMV SIGNAL \" + str(close_data.dtype) + \".svg\")\n\ndef fi():\n plt.clf()\n plt.title(\"Force Index (FI) \"+str(close_data.dtype))\n so = fast_ta.volume.FI(close=close_data, volume=volume_data, n=13)\n plt.plot(so)\n sot = ta.volume.ForceIndexIndicator(pandas.Series(close_data), pandas.Series(volume_data))\n plt.plot(sot.force_index())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/FI \" + str(close_data.dtype) + \".svg\")\n \ndef mfi():\n plt.clf()\n plt.title(\"Money Flow Index (MFI) \"+str(close_data.dtype))\n so = fast_ta.volume.MFI(high=high_data, low=low_data, close=close_data, volume=volume_data, n=14)\n plt.plot(so)\n sot = ta.volume.MFIIndicator(pandas.Series(high_data), pandas.Series(low_data), pandas.Series(close_data), pandas.Series(volume_data))\n plt.plot(sot.money_flow_index())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/MFI \" + str(close_data.dtype) + \".svg\")\n\ndef nvi():\n plt.clf()\n plt.title(\"Negative Volume Index (NVI) \"+str(close_data.dtype))\n so = fast_ta.volume.NVI(close=close_data, volume=volume_data)\n plt.plot(so)\n sot = ta.volume.NegativeVolumeIndexIndicator(pandas.Series(close_data), pandas.Series(volume_data))\n plt.plot(sot.negative_volume_index())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/NVI \" + str(close_data.dtype) + \".svg\")\n \ndef obv():\n plt.clf()\n plt.title(\"On-balance volume (OBV) \"+str(close_data.dtype))\n so = fast_ta.volume.OBV(close=close_data, volume=volume_data)\n plt.plot(so)\n sot = ta.volume.OnBalanceVolumeIndicator(pandas.Series(close_data), pandas.Series(volume_data))\n plt.plot(sot.on_balance_volume())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/OBV \" + str(close_data.dtype) + \".svg\")\n \ndef vpt():\n # DIFF, believe TA's implementation is broken\n plt.clf()\n plt.title(\"Volume-price trend (VPT) \"+str(close_data.dtype))\n so = fast_ta.volume.VPT(close=close_data, volume=volume_data)\n plt.plot(so)\n sot = ta.volume.VolumePriceTrendIndicator(pandas.Series(close_data), pandas.Series(volume_data))\n plt.plot(sot.volume_price_trend())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/VPT \" + str(close_data.dtype) + \".svg\")\n \ndef vwap():\n plt.clf()\n plt.title(\"Volume Weighted Average Price (VWAP) \"+str(close_data.dtype))\n so = fast_ta.volume.VWAP(high=high_data, low=low_data, close=close_data, volume=volume_data, n=14)\n plt.plot(so)\n sot = ta.volume.VolumeWeightedAveragePrice(pandas.Series(high_data), pandas.Series(low_data), pandas.Series(close_data), pandas.Series(volume_data))\n plt.plot(sot.volume_weighted_average_price())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/VWAP \" + str(close_data.dtype) + \".svg\")\n \ndef atr():\n # slight diff vs TA, they zero out the first n values\n plt.clf()\n plt.title(\"Average True Range (ATR) \"+str(close_data.dtype))\n so = fast_ta.volatility.ATR(high=high_data, low=low_data, close=close_data, n=14)\n plt.plot(so)\n sot = ta.volatility.AverageTrueRange(pandas.Series(high_data), pandas.Series(low_data), pandas.Series(close_data))\n plt.plot(sot.average_true_range())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/ATR \" + str(close_data.dtype) + \".svg\")\n\ndef bol():\n plt.clf()\n plt.title(\"Bollinger Bands hband \"+str(close_data.dtype))\n so = fast_ta.volatility.BOL(close = close_data, n = 20, ndev = 2)\n plt.plot(so[0])\n sot = ta.volatility.BollingerBands(pandas.Series(close_data))\n plt.plot(sot.bollinger_hband())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/BOL HBAND \" + str(close_data.dtype) + \".svg\")\n\n plt.clf()\n plt.title(\"Bollinger Bands mband \"+str(close_data.dtype))\n plt.plot(so[1])\n plt.plot(sot.bollinger_mavg())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/BOL MBAND \" + str(close_data.dtype) + \".svg\")\n \n plt.clf()\n plt.title(\"Bollinger Bands lband \"+str(close_data.dtype))\n plt.plot(so[2])\n plt.plot(sot.bollinger_lband())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/BOL LBAND \" + str(close_data.dtype) + \".svg\")\n\ndef dc():\n # TA API modifications ***\n # artifacts in computation? otherwise it lines up.\n plt.clf()\n plt.title(\"Donchian Channel hband \"+str(close_data.dtype))\n so = fast_ta.volatility.DC(high = high_data, low = low_data, n = 20)\n plt.plot(so[0])\n sot = ta.volatility.DonchianChannel(high = pandas.Series(high_data), low = pandas.Series(low_data), close = pandas.Series(close_data), window = 20)\n plt.plot(sot.donchian_channel_hband())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/DC HBAND \" + str(close_data.dtype) + \".svg\")\n \n plt.clf()\n plt.title(\"Donchian Channel lband \"+str(close_data.dtype))\n plt.plot(so[2])\n plt.plot(sot.donchian_channel_lband())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/DC LBAND \" + str(close_data.dtype) + \".svg\")\n\ndef kc():\n # DIFF\n plt.clf()\n plt.title(\"Keltner Channel hband \"+str(close_data.dtype))\n so = fast_ta.volatility.KC(high=high_data, low=low_data, close=close_data, n1=20, n2=10, num_channels=1)\n plt.plot(so[2])\n sot = ta.volatility.KeltnerChannel(pandas.Series(high_data), pandas.Series(low_data), pandas.Series(close_data), original_version=False)\n plt.plot(sot.keltner_channel_hband())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/KC HBAND \" + str(close_data.dtype) + \".svg\")\n\n plt.clf()\n plt.title(\"Keltner Channel mband \"+str(close_data.dtype))\n plt.plot(so[1])\n plt.plot(sot.keltner_channel_mband())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/KC MBAND \" + str(close_data.dtype) + \".svg\")\n \n plt.clf()\n plt.title(\"Keltner Channel lband \"+str(close_data.dtype))\n plt.plot(so[0])\n plt.plot(sot.keltner_channel_lband())\n if args.show_plots:\n plt.show()\n if args.save_plots:\n plt.savefig(\"tests/plots/KC LBAND \" + str(close_data.dtype) + \".svg\")\n \ndef run_tests():\n rsi()\n ao()\n kama()\n roc()\n stoch()\n tsi()\n uo()\n wr()\n adi()\n cmf()\n emv()\n fi()\n mfi()\n nvi()\n obv()\n vpt()\n vwap()\n atr()\n bol()\n dc()\n kc()\n\nplt.rcParams['figure.figsize'] = (20.0, 10.0)\nrun_tests()\nclose_data = np.array(close_data, dtype=np.float32)\nhigh_data = np.array(high_data, dtype=np.float32)\nlow_data = np.array(low_data, dtype=np.float32)\nopen_data = np.array(open_data, dtype=np.float32)\nvolume_data = np.array(volume_data, dtype=np.float32)\nrun_tests()\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"pandas.Series",
"matplotlib.pyplot.clf"
]
] |
lzmch/framequery | [
"48ed133dec90a9d2b8f4bef4b40f3a1909a0457f"
] | [
"tests/test__postgres_conformance.py"
] | [
"\"\"\"Tests to ensure that framequery and postgres understand the same SQL\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport decimal\n\nimport dask.dataframe as dd\nimport pandas as pd\nimport pandas.util.testing as pdt\n\nimport framequery as fq\nimport framequery.util as util\n\n\nimport pytest\n\ndata = {\n 'test': dict(\n columns=['c1', 'c2'],\n types=['int', 'int'],\n data=[\n (0, 1),\n (1, 2),\n (0, 3),\n (1, 4),\n (0, 5),\n (1, 6),\n ],\n ),\n 'other': dict(\n columns=['c3', 'c4'],\n types=['int', 'int'],\n data=[\n (0, 7),\n (1, 8),\n (0, 9),\n (1, 0),\n (0, 1),\n (1, 2),\n ],\n ),\n 'names': dict(\n columns=['name', 'id'],\n types=['text', 'int'],\n data=[\n ('foo', 0),\n ('bar', 1),\n ('baz', 2),\n ],\n ),\n 'ages': dict(\n columns=['age', 'id'],\n types=['int', 'int'],\n data=[\n (20, 0),\n (30, 1),\n (40, 2),\n ],\n ),\n}\n\n\[email protected](scope='module')\ndef setup(database):\n for name, desc in data.items():\n database.execute('drop table if exists %s' % name)\n\n coldef = ', '.join('%s %s' % (n, t) for n, t in zip(desc['columns'], desc['types']))\n database.execute('create table %s(%s)' % (name, coldef))\n\n cols = ', '.join(desc['columns'])\n database.execute('insert into %s (%s) values(%%s, %%s)' % (name, cols), *desc['data'])\n\n scope = {\n name: pd.DataFrame(desc['data'], columns=desc['columns'])\n for name, desc in data.items()\n }\n\n return database, scope\n\n\nexamples = [\n 'select c1, count(1) as cnt, sum(c2) from test group by c1',\n 'select c1, count(1) as cnt, sum(c2) from test group by 1',\n 'select c1 as a, c2 as b, c1 + c2 from test',\n 'select c1 as a, c2 as b, c1 + c2 from test where c1 = 0',\n '''\n with temp as (\n select\n c1 as a,\n c2 as b\n from test\n )\n\n select a + b from temp\n ''',\n 'select test.* from test',\n 'select count(*) from test group by 1 = 1',\n 'select count(*) from test',\n 'select test.c1, 2 * test.c2 from test',\n '''\n select\n c1, count(1) as cnt, sum(c2)\n from (\n select c1, 2 * c2 as c2\n from test\n ) sq\n group by c1\n ''',\n '''-- simple join\n select c2, c4\n from test\n join other\n on c1 = c3\n ''',\n '''-- joins as filters (left filter)\n select c2, c4\n from test\n join other\n on c1 = c3 and c1 = 0\n ''',\n '''-- joins as filters (right filter)\n select c2, c4\n from test\n join other\n on c1 = c3 and c3 = 0\n ''',\n '''-- join with transforms (left transform)\n select c2, c4\n from test\n join other\n on (c1 + 1) % 2 = c3\n ''',\n '''-- join with transforms (right transform)\n select c2, c4\n from test\n join other\n on c1 = (c3 + 1) % 2\n ''',\n '''-- cross-join with filter\n select c2, c4\n from test, other\n where c1 = (c3 + 1) % 2\n ''',\n '''-- join with inequality\n select c2, c4\n from test\n join other\n on c1 <= c3\n ''',\n '''\n select *\n from test\n left join other\n on c1 < c3\n ''',\n '''\n select *\n from other\n right join test\n on c1 < c3\n ''',\n '''\n select c2, c4\n from test\n left join other\n on c1 = (c3 + 1) % 2\n ''',\n '''\n select sum(c2), avg(c4)\n from test\n join other\n on c1 = c3\n group by c1\n ''',\n 'select \"c1\", \"test\".\"c2\" from test',\n\n # test case sensitivity\n r'''select 'Foo' like '%oo' ''',\n r'''select 'Foo' like '%OO' ''',\n r'''select upper('Foo') like '%OO' ''',\n r'''select 'Foo' like lower('%OO') ''',\n r'''select concat('foo', null, 'bar')''',\n\n r\"\"\"select * from json_each(cast('{\"foo\": \"bar\", \"hello\": \"world\"}' as json)) \"\"\",\n r\"\"\"select * from json_each('{\"foo\": \"bar\", \"hello\": \"world\"}' :: json)\"\"\",\n r\"\"\"\n select b.key\n from\n json_array_elements('[{\"foo\": \"bar\"}]' :: json),\n lateral json_each(value) as b\n order by key\n \"\"\",\n \"\"\"select trim(both 'xyz' from 'yxTomxx')\"\"\",\n \"\"\"select position('f' in 'foo'), position('b' in 'foo')\"\"\",\n \"\"\"select 'Post' || 'greSQL' \"\"\",\n\n \"\"\"select true and false, true or false, true and not false \"\"\",\n\n \"\"\"select 0 <> 1, 0 < 1, 1 > 2, 1 <= 2, 3 >= 4, 3 = 3, 4 != 4 \"\"\",\n\n '''\n select\n 2 + 3, 2 - 3, 2 * 3, 4 / 2,\n 5 % 4, 2.0 ^ 3.0, 91 & 15,\n 32 | 3, 17 # 5, ~1, 1 << 4,\n 8 >> 2\n ''',\n # '''-- not yet supported numeric operators\n # select\n # |/ 25.0, ||/ 27.0,\n # 5 !, !! 5, @ -5.0,\n # ''',\n 'select * from test limit 3',\n 'select * from test limit 3 offset 2',\n 'select * from test offset 3',\n\n 'select distinct c1 from test',\n 'select all c1 from test',\n '''\n SELECT c2,\n CASE WHEN c2 = 1 THEN 'one'\n WHEN c2 = 2 THEN 'two'\n ELSE 'other'\n END\n FROM test\n ''',\n '''\n SELECT c2,\n CASE c2\n WHEN 1 THEN 'one'\n WHEN 2 THEN 'two'\n ELSE 'other'\n END\n FROM test\n ''',\n '''select false and true or true''',\n '''select c1, c2 from test order by 1, c2''',\n '''select * from test as a, test as b''',\n\n # TODO: extend operator precedence test\n '''\n select\n (2.0 + 3.0) ^ 2.0 + (1.0 - 2.0) ^ 2.0,\n 2.0 * 3.0 ^ 2.0,\n 2.0 * 4.0 + 3.0\n ''',\n\n '''\n select names.id, avg(age)\n from names\n join ages\n on names.id = ages.id\n group by 1\n ''',\n]\n\ndask_xfail_examples = []\n\nxfail_examples = [\n '''\n select distinct on (c1)\n c1,\n c2,\n -c2 ^ c1 as c3\n from test\n order by c1, c2\n ''',\n '''\n -- order-by with a transformed column\n select *\n from test\n order by 4 * c1\n ''',\n]\n\nexamples = (\n [('pandas', q) for q in examples] +\n [pytest.mark.xfail()(('pandas', q)) for q in xfail_examples] +\n [('pandas', q) for q in dask_xfail_examples] +\n\n [('dask', q) for q in examples] +\n [pytest.mark.xfail()(('dask', q)) for q in xfail_examples] +\n [pytest.mark.xfail()(('dask', q)) for q in dask_xfail_examples]\n)\n\n\[email protected]('model, query', examples)\ndef test_select(setup, model, query):\n db, scope = setup\n\n if model == 'dask':\n scope = {k: dd.from_pandas(df, npartitions=3) for (k, df) in scope.items()}\n\n actual = fq.execute(query, scope, model=model)\n\n expected = _norm_result(db.execute(query.replace('%', '%%')).fetchall())\n actual = _norm_result(row for _, row in actual.iterrows())\n\n print('expected', expected, 'actual', actual, sep='\\n')\n pdt.assert_frame_equal(actual, expected, check_dtype=False, check_less_precise=True)\n\n\ndef _norm_result(iterable):\n return (\n pd.DataFrame(\n list(_norm_value(v) for v in row)\n for row in iterable\n )\n .pipe(lambda df: df.sort_values(list(df.columns)))\n .reset_index(drop=True)\n )\n\n\ndef _norm_value(v):\n if isinstance(v, decimal.Decimal):\n return float(v)\n return v\n\n\[email protected]('val', [\n 'foo',\n \"bar'baz\",\n 1, 4,\n -42.0,\n None, False, True,\n])\ndef test_escape_roundtrib(database, val):\n \"\"\"test the escape function\"\"\"\n assert database.execute('select ' + util.escape(val)).scalar() == val\n"
] | [
[
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal"
]
] |
osuzdalev/manim-1 | [
"adab2430645637a5e7e73832d3a6ff9e7d390159"
] | [
"manim/camera/mapping_camera.py"
] | [
"\"\"\"A camera that allows mapping between objects.\"\"\"\n\n__all__ = [\"MappingCamera\", \"OldMultiCamera\", \"SplitScreenCamera\"]\n\n\nimport numpy as np\n\nfrom ..camera.camera import Camera\nfrom ..mobject.types.vectorized_mobject import VMobject\nfrom ..utils.config_ops import DictAsObject\n\n# TODO: Add an attribute to mobjects under which they can specify that they should just\n# map their centers but remain otherwise undistorted (useful for labels, etc.)\n\n\nclass MappingCamera(Camera):\n \"\"\"Camera object that allows mapping\n between objects.\n \"\"\"\n\n def __init__(\n self,\n mapping_func=lambda p: p,\n min_num_curves=50,\n allow_object_intrusion=False,\n **kwargs\n ):\n self.mapping_func = mapping_func\n self.min_num_curves = min_num_curves\n self.allow_object_intrusion = allow_object_intrusion\n Camera.__init__(self, **kwargs)\n\n def points_to_pixel_coords(self, points):\n return Camera.points_to_pixel_coords(\n self,\n np.apply_along_axis(self.mapping_func, 1, points),\n )\n\n def capture_mobjects(self, mobjects, **kwargs):\n mobjects = self.get_mobjects_to_display(mobjects, **kwargs)\n if self.allow_object_intrusion:\n mobject_copies = mobjects\n else:\n mobject_copies = [mobject.copy() for mobject in mobjects]\n for mobject in mobject_copies:\n if (\n isinstance(mobject, VMobject)\n and 0 < mobject.get_num_curves() < self.min_num_curves\n ):\n mobject.insert_n_curves(self.min_num_curves)\n Camera.capture_mobjects(\n self,\n mobject_copies,\n include_submobjects=False,\n excluded_mobjects=None,\n )\n\n\n# Note: This allows layering of multiple cameras onto the same portion of the pixel array,\n# the later cameras overwriting the former\n#\n# TODO: Add optional separator borders between cameras (or perhaps peel this off into a\n# CameraPlusOverlay class)\n\n# TODO, the classes below should likely be deleted\nclass OldMultiCamera(Camera):\n def __init__(self, *cameras_with_start_positions, **kwargs):\n self.shifted_cameras = [\n DictAsObject(\n {\n \"camera\": camera_with_start_positions[0],\n \"start_x\": camera_with_start_positions[1][1],\n \"start_y\": camera_with_start_positions[1][0],\n \"end_x\": camera_with_start_positions[1][1]\n + camera_with_start_positions[0].pixel_width,\n \"end_y\": camera_with_start_positions[1][0]\n + camera_with_start_positions[0].pixel_height,\n },\n )\n for camera_with_start_positions in cameras_with_start_positions\n ]\n Camera.__init__(self, **kwargs)\n\n def capture_mobjects(self, mobjects, **kwargs):\n for shifted_camera in self.shifted_cameras:\n shifted_camera.camera.capture_mobjects(mobjects, **kwargs)\n\n self.pixel_array[\n shifted_camera.start_y : shifted_camera.end_y,\n shifted_camera.start_x : shifted_camera.end_x,\n ] = shifted_camera.camera.pixel_array\n\n def set_background(self, pixel_array, **kwargs):\n for shifted_camera in self.shifted_cameras:\n shifted_camera.camera.set_background(\n pixel_array[\n shifted_camera.start_y : shifted_camera.end_y,\n shifted_camera.start_x : shifted_camera.end_x,\n ],\n **kwargs,\n )\n\n def set_pixel_array(self, pixel_array, **kwargs):\n Camera.set_pixel_array(self, pixel_array, **kwargs)\n for shifted_camera in self.shifted_cameras:\n shifted_camera.camera.set_pixel_array(\n pixel_array[\n shifted_camera.start_y : shifted_camera.end_y,\n shifted_camera.start_x : shifted_camera.end_x,\n ],\n **kwargs,\n )\n\n def init_background(self):\n Camera.init_background(self)\n for shifted_camera in self.shifted_cameras:\n shifted_camera.camera.init_background()\n\n\n# A OldMultiCamera which, when called with two full-size cameras, initializes itself\n# as a split screen, also taking care to resize each individual camera within it\n\n\nclass SplitScreenCamera(OldMultiCamera):\n def __init__(self, left_camera, right_camera, **kwargs):\n Camera.__init__(self, **kwargs) # to set attributes such as pixel_width\n self.left_camera = left_camera\n self.right_camera = right_camera\n\n half_width = self.pixel_width / 2\n for camera in [self.left_camera, self.right_camera]:\n # TODO: Round up on one if width is odd\n camera.reset_pixel_shape(camera.pixel_height, half_width)\n\n OldMultiCamera.__init__(\n self,\n (left_camera, (0, 0)),\n (right_camera, (0, half_width)),\n )\n"
] | [
[
"numpy.apply_along_axis"
]
] |
skinnider/REINVENT | [
"6eec5cd18badad8cc5d73a096d11a3cfb753b17b"
] | [
"model.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom utils import Variable\n\nclass MultiGRU(nn.Module):\n \"\"\" Implements a three layer GRU cell including an embedding layer\n and an output linear layer back to the size of the vocabulary\"\"\"\n def __init__(self, voc_size):\n super(MultiGRU, self).__init__()\n self.embedding = nn.Embedding(voc_size, 128)\n self.gru_1 = nn.GRUCell(128, 512)\n self.gru_2 = nn.GRUCell(512, 512)\n self.gru_3 = nn.GRUCell(512, 512)\n self.linear = nn.Linear(512, voc_size)\n\n def forward(self, x, h):\n x = self.embedding(x)\n h_out = Variable(torch.zeros(h.size()))\n x = h_out[0] = self.gru_1(x, h[0])\n x = h_out[1] = self.gru_2(x, h[1])\n x = h_out[2] = self.gru_3(x, h[2])\n x = self.linear(x)\n return x, h_out\n\n def init_h(self, batch_size):\n # Initial cell state is zero\n return Variable(torch.zeros(3, batch_size, 512))\n\nclass RNN():\n \"\"\"Implements the Prior and Agent RNN. Needs a Vocabulary instance in\n order to determine size of the vocabulary and index of the END token\"\"\"\n def __init__(self, voc):\n self.rnn = MultiGRU(voc.vocab_size)\n if torch.cuda.is_available():\n self.rnn.cuda()\n self.voc = voc\n\n def likelihood(self, target):\n \"\"\"\n Retrieves the likelihood of a given sequence\n\n Args:\n target: (batch_size * sequence_lenght) A batch of sequences\n\n Outputs:\n log_probs : (batch_size) Log likelihood for each example*\n entropy: (batch_size) The entropies for the sequences. Not\n currently used.\n \"\"\"\n batch_size, seq_length = target.size()\n start_token = Variable(torch.zeros(batch_size, 1).long())\n start_token[:] = self.voc.vocab['GO']\n x = torch.cat((start_token, target[:, :-1]), 1)\n h = self.rnn.init_h(batch_size)\n\n log_probs = Variable(torch.zeros(batch_size))\n entropy = Variable(torch.zeros(batch_size))\n for step in range(seq_length):\n logits, h = self.rnn(x[:, step], h)\n log_prob = F.log_softmax(logits)\n prob = F.softmax(logits)\n log_probs += NLLLoss(log_prob, target[:, step])\n entropy += -torch.sum((log_prob * prob), 1)\n return log_probs, entropy\n\n def sample(self, batch_size, max_length=250, return_smiles=False,\n enable_grad=False):\n \"\"\"\n Sample a batch of sequences\n\n Args:\n batch_size : Number of sequences to sample \n max_length: Maximum length of the sequences\n\n Outputs:\n seqs: (batch_size, seq_length) The sampled sequences.\n log_probs : (batch_size) Log likelihood for each sequence.\n entropy: (batch_size) The entropies for the sequences. Not\n currently used.\n \"\"\"\n # turn on evaluation mode\n if not enable_grad:\n self.rnn.eval();\n with torch.set_grad_enabled(enable_grad):\n start_token = Variable(torch.zeros(batch_size).long())\n start_token[:] = self.voc.vocab['GO']\n h = self.rnn.init_h(batch_size)\n x = start_token\n \n sequences = []\n log_probs = Variable(torch.zeros(batch_size))\n finished = Variable(torch.zeros(batch_size).byte())\n entropy = Variable(torch.zeros(batch_size))\n \n for step in range(max_length):\n logits, h = self.rnn(x, h)\n prob = F.softmax(logits)\n log_prob = F.log_softmax(logits)\n x = torch.multinomial(prob, 1).view(-1)\n sequences.append(x.view(-1, 1))\n log_probs += NLLLoss(log_prob, x)\n entropy += -torch.sum((log_prob * prob), 1)\n \n x = Variable(x.data)\n EOS_sampled = (x == self.voc.vocab['EOS']).data\n finished = torch.ge(finished + EOS_sampled, 1)\n if torch.prod(finished) == 1: break\n \n # concatenate sequences and optionally decode\n sequences = torch.cat(sequences, 1)\n if return_smiles:\n sequences = [self.voc.decode(seq.cpu().numpy()) for \\\n seq in sequences]\n else:\n sequences = sequences.data\n \n # restore training mode\n if not enable_grad:\n self.rnn.train();\n # return\n return sequences, log_probs, entropy\n\ndef NLLLoss(inputs, targets):\n \"\"\"\n Custom Negative Log Likelihood loss that returns loss per example,\n rather than for the entire batch.\n\n Args:\n inputs : (batch_size, num_classes) *Log probabilities of each class*\n targets: (batch_size) *Target class index*\n\n Outputs:\n loss : (batch_size) *Loss for each example*\n \"\"\"\n\n if torch.cuda.is_available():\n target_expanded = torch.zeros(inputs.size()).cuda()\n else:\n target_expanded = torch.zeros(inputs.size())\n\n target_expanded.scatter_(1, targets.contiguous().view(-1, 1).data, 1.0)\n loss = Variable(target_expanded) * inputs\n loss = torch.sum(loss, 1)\n return loss\n"
] | [
[
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.prod",
"torch.set_grad_enabled",
"torch.nn.functional.log_softmax",
"torch.multinomial",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"torch.ge",
"torch.nn.Embedding",
"torch.nn.GRUCell",
"torch.sum"
]
] |
Kminassch/CoScal | [
"af936dd6881c26019e03aec7edacb278e46017cf"
] | [
"local_client/normal.py"
] | [
"# -*- coding: utf-8 -*-\r\nimport os\r\nfrom time import sleep\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n# read data\r\ndataset = pd.read_csv('Alibaba_requests_up_5min_6core.csv')\r\n\r\nrequests_data = dataset['requests']\r\nrequests_data = requests_data.values\r\nstate_cur = []\r\n\r\n# pull requests via jmeter\r\nfor i in range(200, 300):\r\n k = requests_data[i]\r\n os.system(\"jmeter -n -t newplan.jmx -l normal/jmeter_rawdata_%s.csv -JthreadNum=%s\" % (i, k))\r\n jmeter = pd.read_csv('normal/jmeter_rawdata_%s.csv' % i)\r\n elapsed = np.array(jmeter['elapsed'])\r\n timeStamp = jmeter['timeStamp']\r\n success = np.array(jmeter['success'].astype('int'))\r\n latency = np.array(jmeter['Latency'])\r\n connect = np.array(jmeter['Connect'])\r\n lenth = len(timeStamp)\r\n timeStamp_len = timeStamp[lenth - 1] - timeStamp[0]\r\n # QoS: version 3\r\n state_cur_val = (np.mean(elapsed) * 0.05 + np.mean(latency) * 0.1 + np.mean(connect) * 0.05 + (\r\n 1 - np.mean(success)) * 2 + (timeStamp_len - 300000) / 300000) / 8\r\n if state_cur_val > 4:\r\n state_cur_val = 4\r\n state_cur.append(int(round(state_cur_val)))\r\n csv = pd.DataFrame(data=state_cur)\r\n names = ['state']\r\n csv.columns = names\r\n csv.to_csv('state_adjust.csv', mode='w')\r\n os.system(\"sudo pkill -9 java\")\r\n os.system(\"sudo pkill -9 jmeter\")\r\n sleep(15)\r\n"
] | [
[
"numpy.array",
"pandas.read_csv",
"numpy.mean",
"pandas.DataFrame"
]
] |
dudeperf3ct/end-to-end-images | [
"1dbd815d38795fc6d6f2d5f27d416abb1da5c63c"
] | [
"model.py"
] | [
"import copy\nimport torch\nimport torch.nn as nn\n\nimport timm\n\n\ndef set_parameter_requires_grad(\n model, feature_extracting: bool, num_ft_layers: int\n):\n \"\"\"\n Freeze the weights of the model is feature_extracting=True\n Fine tune layers >= num_ft_layers\n Batch Normalization: https://keras.io/guides/transfer_learning/\n\n Args:\n model: PyTorch model\n feature_extracting (bool): A bool to set all parameters to be trainable or not\n num_ft_layers (int): Number of layers to freeze and unfreezing the rest\n \"\"\"\n if feature_extracting:\n if num_ft_layers != -1:\n for i, module in enumerate(model.modules()):\n if i >= num_ft_layers:\n if not isinstance(module, nn.BatchNorm2d):\n module.requires_grad_(True)\n else:\n module.requires_grad_(False)\n else:\n for param in model.parameters():\n param.requires_grad = False\n # not recommended to set feature_extracting=True when use_pretrained=True\n else:\n for param in model.parameters():\n param.requires_grad = True\n\n\ndef _create_classifier(num_ftrs: int, embedding_size: int, num_classes: int):\n \"\"\"Add a classifier head with 2 FC layers\n\n Args:\n num_ftrs (int): Number of features from timm models\n embedding_size (int): Number of features in penultimate layer\n num_classes (int): Number of classes\n \"\"\"\n head = nn.Sequential(\n nn.Linear(num_ftrs, embedding_size),\n nn.Linear(embedding_size, num_classes),\n )\n return head\n\n\ndef build_models(\n model_name: str,\n num_classes: int,\n in_channels: int,\n embedding_size: int,\n feature_extract: bool = True,\n use_pretrained: bool = True,\n num_ft_layers: int = -1,\n bst_model_weights=None,\n):\n \"\"\"\n Build various architectures to either train from scratch, finetune or as feature extractor.\n\n Args:\n model_name (str) : Name of model from `timm.list_models(pretrained=use_pretrained)`\n num_classes (int) : Number of output classes added as final layer\n in_channels (int) : Number of input channels\n embedding_size (int): Size of intermediate features\n feature_extract (bool): Flag for feature extracting.\n False = finetune the whole model,\n True = only update the new added layers params\n use_pretrained (bool): Pretraining parameter to pass to the model or if base_model_path is given use that to\n initialize the model weights\n num_ft_layers (int) : Number of layers to finetune\n Default = -1 (do not finetune any layers)\n bst_model_weights : Best weights obtained after training pretrained model\n which will be used for further finetuning.\n\n Returns:\n model : A pytorch model\n \"\"\"\n supported_models = timm.list_models(pretrained=use_pretrained)\n model = None\n if model_name in supported_models:\n model = timm.create_model(\n model_name, pretrained=use_pretrained, in_chans=in_channels\n )\n set_parameter_requires_grad(model, feature_extract, num_ft_layers)\n # check if last layer in timm models is either classifier or fc\n try:\n num_ftrs = model.classifier.in_features\n model.classifier = _create_classifier(\n num_ftrs, embedding_size, num_classes\n )\n except AttributeError:\n num_ftrs = model.fc.in_features\n model.fc = _create_classifier(\n num_ftrs, embedding_size, num_classes\n )\n else:\n print(\"Invalid model name, exiting...\")\n exit()\n # load best model dict for further finetuning\n if bst_model_weights is not None:\n pretrain_model = torch.load(bst_model_weights)\n best_model_wts = copy.deepcopy(pretrain_model.state_dict())\n if feature_extract and num_ft_layers != -1:\n model.load_state_dict(best_model_wts)\n return model\n"
] | [
[
"torch.nn.Linear",
"torch.load"
]
] |
cia05rf/async-scrape | [
"af0ecfd345ce8f34c07317b8681008e6caa114ad"
] | [
"async_scrape/libs/async_scrape.py"
] | [
"\nimport asyncio\nimport nest_asyncio\nimport aiohttp\nimport sys\nimport logging\nimport contextlib\nimport pandas as pd\nfrom time import sleep\n\nfrom aiohttp.client_exceptions import ServerDisconnectedError, ClientConnectionError\n\nfrom .base_scrape import BaseScrape\n\n\nclass AsyncScrape(BaseScrape):\n def __init__(self,\n post_process_func:callable,\n post_process_kwargs:dict={},\n fetch_error_handler:callable=None,\n use_proxy:bool=False,\n proxy:str=None,\n pac_url:str=None,\n consecutive_error_limit:int=100,\n attempt_limit:int=5,\n rest_between_attempts:bool=True,\n rest_wait:int=60\n ):\n \"\"\"Class for scrapping webpages\n \n args:\n ----\n post_process_func - callable - for processing html\n post_process_kwargs - dict:{} - kwargs for use in post processing\n fetch_error_handler - callable:None - the function to be called if an\n error is experienced during _fetch. Passes in:\n url, error as arguments\n use_proxy - bool:False - should a proxy be used\n proxy - str:None - what is the address of the proxy ONLY VALID IF\n PROXY IS TRUE\n pac_Url - str:None - the location of the pac information ONLY VALID IF\n PROXY IS TRUE\n consecutive_error_limit - int:100 - the number of times an error can be experienced \n in a row before the scrape is cancelled and a new round is started\n attempt_limit - int:5 - number of times a url can be attempted before it's abandoned\n rest_between_attempts - bool:True - should the program rest between scrapes\n rest_wait - int:60 - how long should the program rest for ONLY VALID IF\n REST_BETWEEN_SCRAPES IS TRUE\n \"\"\"\n #Init super\n super().__init__(\n use_proxy=use_proxy,\n proxy=proxy,\n pac_url=pac_url\n )\n self.post_process = post_process_func\n self.post_process_kwargs = post_process_kwargs\n self.headers = {}\n self.fetch_error_handler = fetch_error_handler\n self.shutdown_initiated = False\n #Establish loop and coro\n self.loop = None\n self.coro = None\n self.gathered_tasks = None\n #Define allowed errors\n self.acceptable_errors = (ServerDisconnectedError, ClientConnectionError)\n self.consecutive_error_limit = consecutive_error_limit\n self.consecutive_error_count = 0\n #Define criteria for looping multiple attempts\n self.attempt_limit = attempt_limit\n self.rest_between_attempts = rest_between_attempts\n self.rest_wait = rest_wait\n self.tracker_df = None\n self.cur_err = None\n \n async def shutdown(self):\n #Mark shutdown as started\n self.shutdown_initiated = True\n logging.info(\"Shutdown of scrape initialized ...\")\n self.gathered_tasks.cancel()\n\n def _proxy(self):\n #Set policy if using windows\n self._set_policy()\n #Start the pac session\n self._get_pac_session()\n\n def _set_policy(self):\n if sys.platform.startswith(\"win\") \\\n and sys.version_info[0] == 3 \\\n and sys.version_info[1] >= 8:\n policy = asyncio.WindowsSelectorEventLoopPolicy()\n asyncio.set_event_loop_policy(policy)\n\n def _get_event_loop(self):\n self.loop = asyncio.get_event_loop()\n if isinstance(self.loop, asyncio.BaseEventLoop):\n nest_asyncio.apply()\n return self.loop\n \n async def _fetch(self, session, url):\n \"\"\"Function to fetch HTML from url\n \n args:\n ----\n session - aiohttp.ClientSession() object\n url - str - url to be requested\n\n returns:\n ----\n list\n \"\"\"\n #Get the proxy for this url\n proxy = self._get_proxy(url)\n #Fetch with aiohttp session\n try:\n async with session.request(\"get\", url, proxy=proxy, headers=self.headers) as resp:\n if resp.status == 200:\n html = await resp.text()\n func_resp = self.post_process(html=html, resp=resp, **self.post_process_kwargs)\n else:\n func_resp = None\n #Reset self.acceptable_error_count if all goes fine\n self.consecutive_error_count = 0\n return {\"url\":url, \"func_resp\":func_resp, \"status\":resp.status, \"error\":None}\n except Exception as e:\n #Set the current error - increment if the same error\n if type(e) == self.cur_err:\n self.consecutive_error_count += 1\n else:\n self.cur_err = type(e)\n self.consecutive_error_count = 1\n #Check if acceptabe error limit has been reached\n # this prevents functions from carrying on after a site has started blocking calls\n if self.consecutive_error_count >= self.consecutive_error_limit \\\n and not self.shutdown_initiated:\n await self.shutdown()\n logging.warning(f\"Consecutive error limit reached - {e} - consecutive count at {self.consecutive_error_count}/{self.consecutive_error_limit}\")\n #Check for error handler\n if self.fetch_error_handler:\n logging.info(f\"Error passed to {self.fetch_error_handler.__name__}\")\n #Run the error handler\n self.fetch_error_handler(url, e)\n #Check if acceptable error\n if type(e) in self.acceptable_errors:\n logging.error(f\"Acceptable error in request or post processing {url} - {e}\")\n #Raise error\n else:\n logging.error(f\"Unhandled error in request or post processing {url} - {e}\")\n if f\"{e}\" == \"\":\n raise e\n return {\"url\":url, \"func_resp\":None, \"status\":None, \"error\":e}\n\n async def _fetch_async(self, \n session, url\n ):\n \"\"\"Function for getting routes from one locationt to another by different \n modes of transport.\n\n args:\n ----\n session - aiohttp.ClientSession() object\n url - str - url to be requested\n\n returns:\n ----\n float - time (hours)\n \"\"\"\n #Establish return object\n rtrn = {\n \"url\": url,\n \"func_resp\": None,\n \"status\": None\n }\n #Fetch\n rtrn = await self._fetch(session, url)\n #Increment the pages scraped\n self.increment_pages_scraped()\n return rtrn\n\n async def _fetch_all_async(self, urls):\n \"\"\"\"Async function for finding the latitude and \n longitude of a list of locations\n \n args:\n ----\n urls - list - the pages to be scraped\n\n returns:\n ----\n gathered asyncio tasks\n \"\"\"\n tasks = []\n async with aiohttp.ClientSession() as session:\n for url in urls:\n tasks.append(\n self._fetch_async(\n session,\n url\n )\n )\n self.gathered_tasks = asyncio.gather(*tasks)\n with contextlib.suppress(asyncio.CancelledError):\n _ = await self.gathered_tasks\n #Get results individually to deal with premature cancellation\n resps = [\n r.result() for r in self.gathered_tasks._children\n if not r.cancelled()\n ]\n return resps\n\n def _increment_attempts(self, scraped:bool, urls:list=None):\n if urls:\n filter_df = self.tracker_df.url.isin(urls)\n self.tracker_df.loc[filter_df, \"scraped\"] = scraped\n self.tracker_df.loc[filter_df, \"attempts\"] += 1\n else:\n self.tracker_df[\"scraped\"] = scraped\n self.tracker_df[\"attempts\"] += 1\n\n #run from terminal\n def scrape_all(self, urls:list=[]):\n \"\"\"\"Function asynchronously scraping html from urls and passing \n them through the post processing function\n \n args:\n ----\n urls - list - the pages to be scraped\n\n returns:\n ----\n list of dicts\n EG [{\n \"url\":\"http://google.com\",\n \"func_resp\":response from post process function,\n \"status\":200\n }]\n \"\"\"\n self.start_job()\n if self.use_proxy:\n self._proxy()\n #Establish urls\n if not len(urls):\n return []\n #Set a dataframe for tracking the url attempts\n self.tracker_df = pd.DataFrame([\n {\"url\":u, \"scraped\":False, \"attempts\":0}\n for u in urls\n ])\n resps = []\n i = 0\n scrape_urls = urls\n #Start the loop\n self.loop = self._get_event_loop()\n while len(scrape_urls):\n #Ensure shutdown flag is reset self.shutdown_initiated\n self.shutdown_initiated = False\n self.reset_pages_scraped()\n self.total_to_scrape = len(scrape_urls)\n #Gaher tasks and run\n self.coro = self._fetch_all_async(scrape_urls)\n #Build try except clause for premature cancellation\n scrape_resps = self.loop.run_until_complete(self.coro)\n #Add scrape_resps to resps\n resps.extend(scrape_resps)\n #Get scraped urls\n scraped_urls = [\n r[\"url\"] for r in resps\n if not r[\"error\"]\n ]\n #Increment attempts count on each scraped url\n self._increment_attempts(True, scrape_urls)\n #Get errored urls\n errored_urls = [\n r[\"url\"] for r in resps\n if r[\"error\"]\n ]\n #Increment attempts count on each attempted but failed (IE had an error \n # but not cancelled)\n self._increment_attempts(False, errored_urls)\n #Remove scraped urls from scrape_urls\n scrape_urls = [\n u for u in scrape_urls\n if u not in scraped_urls\n ]\n #Remove urls where too many attempts have been made\n failed_urls = self.tracker_df[\n self.tracker_df.attempts >= self.attempt_limit\n ].url.to_list()\n scrape_urls = [\n u for u in scrape_urls\n if u not in failed_urls\n ]\n logging.info(f\"Scraping round {i} complete, in this round - {len(scraped_urls)} urls scrapped, {len(errored_urls)} urls errored, {len(errored_urls) - len(scrape_urls)} urls failed, {len(scrape_urls)} remain unscrapped\")\n #Increment the loop number \n i += 1\n #Sleep before running again\n # - shutdown must have been initiated\n # - there must still be urls to scrape\n # - the rest between attempt flag must be set to True \n if self.shutdown_initiated \\\n and len(scrape_urls) \\\n and self.rest_between_attempts:\n logging.info(f\"Sleeping for {self.rest_wait} seconds\")\n sleep(self.rest_wait)\n logging.info(f\"Scraping complete {len(failed_urls)} urls failed\")\n #end the job\n self.end_job()\n return resps"
] | [
[
"pandas.DataFrame"
]
] |
idunnam/Thesis | [
"a567a25aa037c949de285158804a6ee396fc0e6c"
] | [
"plot_scripts/temperature_timeline_new.py"
] | [
"\"\"\"\nThis code is used to pick and visualize the twenty-year warming period of +4deg +/- 10years for each individual model. \n\"\"\"\n\nimport xarray as xr \nimport matplotlib.pyplot as plt \nimport numpy as np\nimport seaborn as sns\n\n#===================== PLOT rolling mean 4 plot timeline + STD =========================\nACCESS = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/ACCESS_anomaly_JJA.nc').mean(dim=['X10_105','Y21_199'])\nHADGEM_cloud = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/HADGEM_anomaly_JJA_cloud.nc').mean(dim=['X10_105','Y21_199'])\nHADGEM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/HADGEM_anomaly_JJA.nc').mean(dim=['X10_105','Y21_199'])\nCSIRO = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CSIRO_anomaly_JJA.nc').mean(dim=['X10_105','Y21_199'])\nIPSL = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/IPSL_anomaly_JJA.nc').mean(dim=['X10_105','Y21_199'])\nMIROC5 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/MIROC5_anomaly_JJA.nc').mean(dim=['X10_105','Y21_199'])\nNORESM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/NORESM_anomaly_JJA.nc').mean(dim=['X10_105','Y21_199'])\n\n#CMIP6 models\nCESM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CESM_anomaly_JJA.nc').mean(dim=['X10_105','Y21_199'])\nCNRM_ESM2 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CNRM_ESM2_anomaly_JJA.nc').mean(dim=['X10_105','Y21_199'])\nCNRM_CM6 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CNRM_CM6_anomaly_JJA.nc').mean(dim=['X10_105','Y21_199'])\nMRI = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/MRI_anomaly_JJA.nc').mean(dim=['X10_105','Y21_199'])\nUKMO = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/UKMO_anomaly_JJA.nc').mean(dim=['X10_105','Y21_199'])\n\n\n#Spatial-rolling mean\nACCESS_time = ACCESS.rolling(year=20,center= True).mean()\nHADGEM_time = HADGEM.rolling(year=20,center= True).mean()\nHADGEM_cloud_time = HADGEM_cloud.rolling(year=20,center= True).mean()\nCSIRO_time = CSIRO.rolling(year=20,center= True).mean()\nIPSL_time = IPSL.rolling(year=20,center= True).mean()\nMIROC5_time = MIROC5.rolling(year=20,center= True).mean()\nNORESM_time = NORESM.rolling(year=20,center= True).mean()\n\nCESM_time = CESM.rolling(year=20,center= True).mean()\nCNRM_ESM2_time = CNRM_ESM2.rolling(year=20,center= True).mean()\nCNRM_CM6_time = CNRM_CM6.rolling(year=20,center= True).mean()\nMRI_time = MRI.rolling(year=20,center= True).mean()\nUKMO_time = UKMO.rolling(year=20,center= True).mean()\n\n\n#CMIP5 models\nACCESS_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/ACCESS_rol_4.nc').mean(dim='year')\nHADGEM_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/HADGEM_rol_4.nc').mean(dim='year')\nHADGEM_cloud_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/HADGEM_cloud_rol_4.nc').mean(dim='year')\nCSIRO_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/CSIRO_rol_4.nc').mean(dim='year')\nIPSL_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/IPSL_rol_4.nc').mean(dim='year')\nMIROC5_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/MIROC5_rol_4.nc').mean(dim='year')\nNORESM_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/NORESM_rol_4.nc').mean(dim='year')\n\n#CMIP6 models\nCESM_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/CESM_rol_4.nc').mean(dim='year')\nCNRM_ESM2_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/CNRM_ESM2_rol_4.nc').mean(dim='year')\nCNRM_CM6_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/CNRM_CM6_rol_4.nc').mean(dim='year')\nMRI_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/MRI_rol_4.nc').mean(dim='year')\nUKMO_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/rol_mean_3_5_deg/UKMO_rol_4.nc').mean(dim='year')\n\n\n\n\nplt.rcParams.update({\n\"text.usetex\": True,\n\"font.family\": 'DejaVu Sans',\n\"font.serif\": [\"Computer Modern Roman\"],\n\"font.size\": 12,\n\"xtick.labelsize\": 12,\n\"ytick.labelsize\": 12})\n\npx = 1/plt.rcParams['figure.dpi']\n#fig, ax = plt.subplots(1,2, figsize=(20,10),sharey=True,gridspec_kw={'width_ratios': [2, 1]})\n#fig, ax = plt.subplots(2,1, figsize=(612*px,1012*px),sharex=True)\nfig, ax = plt.subplots(1,2, figsize=(612*px,306*px),sharey=True,gridspec_kw={'width_ratios': [2, 1]})\n\n\nax[0].plot(ACCESS_time.year,ACCESS_time.TT,label='ACCESS', linewidth=1.2)\nax[0].plot(HADGEM_time.year,HADGEM_time.TT, label='HADGEM', linewidth=1.2)\nax[0].plot(CSIRO_time.year,CSIRO_time.TT,label='CSIRO', linewidth=1.2)\nax[0].plot(IPSL_time.year,IPSL_time.TT,label='IPSL', linewidth=1.2)\nax[0].plot(MIROC5_time.year,MIROC5_time.TT,label='MIROC5', linewidth=1.2)\nax[0].plot(NORESM_time.year,NORESM_time.TT,label='NORESM', linewidth=1.2)\n\n####---###\n#\"\"\"\nax[1].boxplot([ACCESS_time.TT.sel(year=slice('2077','2091')),\n HADGEM_time.TT.sel(year=slice('2055','2075')),\n CSIRO_time.TT.sel(year=slice('2080','2091')),\n IPSL_time.TT.sel(year=slice('2055','2075')),\n MIROC5_time.TT.sel(year=slice('2059','2079')),\n NORESM_time.TT.sel(year=slice('2068','2088')),\n CESM_time.TT.sel(year=slice('2044','2064')),\n CNRM_ESM2_time.TT.sel(year=slice('2061','2081')),\n CNRM_CM6_time.TT.sel(year=slice('2059','2079')),\n MRI_time.TT.sel(year=slice('2065','2085')),\n UKMO_time.TT.sel(year=slice('2029','2049'))],widths=0.3)\n#secax = ax[0].secondary_yaxis(1.0)\n#secax.set_yticklabels(['ACCESS','HADGEM','CSIRO','IPSL','MIROC5','NORESM','CESM','CNRM-ESM2','CNRM-CM6','MRI','UKESM'])\nax[1].set_xticklabels(['ACCESS','HADGEM','CSIRO','IPSL','MIROC5','NORESM','CESM','CNRM-ESM2','CNRM-CM6','MRI','UKESM'], rotation=90, fontsize = 11)\n\nax[0].plot(CESM_time.year,CESM_time.TT,'--',label='CESM')\nax[0].plot(CNRM_ESM2_time.year,CNRM_ESM2_time.TT,'--',label='CNRM-ESM2')\nax[0].plot(CNRM_CM6_time.year,CNRM_CM6_time.TT,'--',label='CNRM-CM6')\nax[0].plot(MRI_time.year, MRI_time.TT,'--',label='MRI')\nax[0].plot(UKMO_time.year,UKMO_time.TT,'--',label='UKESM', color='navy')\nax[0].legend(frameon=False, ncol=2, fontsize=7)\n\nax[0].hlines(4,1950,2100, color='black', linewidth= 0.7)\n\nax[0].set_xlabel('Year', fontsize = 11)\nax[0].set_ylabel('Near-surface temperature anomalies [$^{\\circ}$C]', fontsize = 11)\n\n\"\"\"\n#ACCESS\n#ax[1].set_ylim(0,11,1)\nax[0].hlines(4,2077,2097, color='blue')\nax[0].scatter(2077,4, marker ='|', s=100, color='blue')\n#ax[0].scatter(2087,4, marker ='|', s=200, color='blue')\nax[0].scatter(2097,4, marker ='|', s=100, color='blue')\n\n#HADGEM\nax[0].hlines(4,2055,2075, color='orange')\nax[0].scatter(2055,4, marker ='|', s=100, color='orange')\n#ax[0].scatter(2075,4, marker ='|', s=200, color='orange')\nax[0].scatter(2065,4, marker ='|', s=100, color='orange')\n\n#CSIRO\nax[0].hlines(4,2080,2100, color='green')\nax[0].scatter(2080,4, marker ='|', s=100, color='green')\n#ax[0].scatter(2090,4, marker ='|', s=200, color='green')\nax[0].scatter(2100,4, marker ='|', s=100, color='green')\n\n#IPSL\nax[0].hlines(4,2055,2075, color='red')\nax[0].scatter(2055,4, marker ='|', s=100, color='red')\n#ax[0].scatter(2065,4, marker ='|', s=200, color='red')\nax[0].scatter(2075,4, marker ='|', s=100, color='red')\n\n#MIROC5\nax[0].hlines(4,2059,2079, color='purple')\nax[0].scatter(2059,4, marker ='|', s=100, color='purple')\n#ax[0].scatter(2069,4, marker ='|', s=200, color='purple')\nax[0].scatter(2079,4, marker ='|', s=100, color='purple')\n\n#NORESM\nax[0].hlines(4,2068,2088, color='brown')\nax[0].scatter(2068,4, marker ='|', s=100, color='brown')\n#ax[0].scatter(2078,4, marker ='|', s=200, color='brown')\nax[0].scatter(2088,4, marker ='|', s=100, color='brown')\n\n#MIROC5\nax[0].hlines(4,2044,2064, color='pink')\nax[0].scatter(2044,4, marker ='|', s=100, color='pink')\n#ax[0].scatter(2054,4, marker ='|', s=200, color='pink')\nax[0].scatter(2064,4, marker ='|', s=100, color='pink')\n\n#CNRM_ESM2\nax[0].hlines(4,2061,2081, color='gray')\nax[0].scatter(2061,4, marker ='|', s=100, color='gray')\n#ax[0].scatter(2071,4, marker ='|', s=200, color='gray')\nax[0].scatter(2081,4, marker ='|', s=100, color='gray')\n\n#CNRM_CM6\nax[0].hlines(4,2059,2079, color='olive')\nax[0].scatter(2059,4, marker ='|', s=100, color='olive')\n#ax[0].scatter(2069,4, marker ='|', s=200, color='olive')\nax[0].scatter(2079,4, marker ='|', s=100, color='olive')\n\n#MRI\nax[0].hlines(4,2065,2085, color='cyan')\nax[0].scatter(2065,4, marker ='|', s=100, color='cyan')\n#ax[0].scatter(2075,4, marker ='|', s=200, color='cyan')\nax[0].scatter(2085,4, marker ='|', s=100, color='cyan')\n\n#UKESM\nax[0].hlines(4,2029,2049, color='navy')\nax[0].scatter(2029,4, marker ='|', s=100, color='navy')\n#ax[0].scatter(2039,4, marker ='|', s=200, color='navy')\nax[0].scatter(2049,4, marker ='|', s=100, color='navy')\n\"\"\"\nsns.despine()\n\nplt.savefig('Figures/temp_timeline_variation.pdf', bbox_inches='tight',dpi=300)\n#\"\"\"\n###---####\n\n\"\"\"\n\nax[1].boxplot([ACCESS_time.year.sel(year=slice('2077','2091')),\n HADGEM_time.year.sel(year=slice('2055','2075')),\n CSIRO_time.year.sel(year=slice('2080','2091')),\n IPSL_time.year.sel(year=slice('2055','2075')),\n MIROC5_time.year.sel(year=slice('2059','2079')),\n NORESM_time.year.sel(year=slice('2068','2088')),\n CESM_time.year.sel(year=slice('2044','2064')),\n CNRM_ESM2_time.year.sel(year=slice('2061','2081')),\n CNRM_CM6_time.year.sel(year=slice('2059','2079')),\n MRI_time.year.sel(year=slice('2065','2085')),\n UKMO_time.year.sel(year=slice('2029','2049'))],xerr=10,vert=False,widths=0.3)\n\"\"\"\n\n\"\"\"\n\n#ACCESS\n\nax[1].set_ylim(0,11,1)\nax[1].hlines(11,2077,2097, color='blue')\nax[1].scatter(2077,11, marker ='|', s=100, color='blue')\nax[1].scatter(2087,11, marker ='|', s=200, color='blue')\nax[1].scatter(2097,11, marker ='|', s=100, color='blue')\n\n#HADGEM\nax[1].hlines(10,2055,2075, color='orange')\nax[1].scatter(2055,10, marker ='|', s=100, color='orange')\nax[1].scatter(2075,10, marker ='|', s=200, color='orange')\nax[1].scatter(2065,10, marker ='|', s=100, color='orange')\n\n#CSIRO\nax[1].hlines(9,2080,2100, color='green')\nax[1].scatter(2080,9, marker ='|', s=100, color='green')\nax[1].scatter(2090,9, marker ='|', s=200, color='green')\nax[1].scatter(2100,9, marker ='|', s=100, color='green')\n\n#IPSL\nax[1].hlines(8,2055,2075, color='red')\nax[1].scatter(2055,8, marker ='|', s=100, color='red')\nax[1].scatter(2065,8, marker ='|', s=200, color='red')\nax[1].scatter(2075,8, marker ='|', s=100, color='red')\n\n#MIROC5\nax[1].hlines(7,2059,2079, color='purple')\nax[1].scatter(2059,7, marker ='|', s=100, color='purple')\nax[1].scatter(2069,7, marker ='|', s=200, color='purple')\nax[1].scatter(2079,7, marker ='|', s=100, color='purple')\n\n#NORESM\nax[1].hlines(6,2068,2088, color='brown')\nax[1].scatter(2068,6, marker ='|', s=100, color='brown')\nax[1].scatter(2078,6, marker ='|', s=200, color='brown')\nax[1].scatter(2088,6, marker ='|', s=100, color='brown')\n\n#MIROC5\nax[1].hlines(5,2044,2064, color='pink')\nax[1].scatter(2044,5, marker ='|', s=100, color='pink')\nax[1].scatter(2054,5, marker ='|', s=200, color='pink')\nax[1].scatter(2064,5, marker ='|', s=100, color='pink')\n\n#CNRM_ESM2\nax[1].hlines(4,2061,2081, color='gray')\nax[1].scatter(2061,4, marker ='|', s=100, color='gray')\nax[1].scatter(2071,4, marker ='|', s=200, color='gray')\nax[1].scatter(2081,4, marker ='|', s=100, color='gray')\n\n#CNRM_CM6\nax[1].hlines(3,2059,2079, color='olive')\nax[1].scatter(2059,3, marker ='|', s=100, color='olive')\nax[1].scatter(2069,3, marker ='|', s=200, color='olive')\nax[1].scatter(2079,3, marker ='|', s=100, color='olive')\n\n#MRI\nax[1].hlines(2,2065,2085, color='cyan')\nax[1].scatter(2065,2, marker ='|', s=100, color='cyan')\nax[1].scatter(2075,2, marker ='|', s=200, color='cyan')\nax[1].scatter(2085,2, marker ='|', s=100, color='cyan')\n\n#UKESM\nax[1].hlines(1,2029,2049, color='navy')\nax[1].scatter(2029,1, marker ='|', s=100, color='navy')\nax[1].scatter(2039,1, marker ='|', s=200, color='navy')\nax[1].scatter(2049,1, marker ='|', s=100, color='navy')\n\n\nax[1].set_xlim(1950,2100)\nax[1].set_yticks([11,10,9,8,7,6,5,4,3,2,1])#,['ACCESS','HADGEM','CSIRO','IPSL','MIROC5','NORESM','CESM','CNRM-ESM2','CNRM-CM6','MRI','UKESM'])\nax[1].set_yticklabels(['ACCESS','HADGEM','CSIRO','IPSL','MIROC5','NORESM','CESM','CNRM-ESM2','CNRM-CM6','MRI','UKESM'])\n\n#mod = np.linspace(0,10,1)\n#year = np.linspace(2000,2100,1)\n#plt.plot(year,mod)\n#plt.hlines(1, 2020, 2040)\n#plt.scatter(2030,1)\n#plt.scatter(2020,1, marker ='|', s=200)\n#plt.scatter(2040,1, marker ='|', s=200)\n#plt.ylim()\n\n#secax = ax[0].secondary_yaxis(1.0)\n#secax.set_yticklabels(['ACCESS','HADGEM','CSIRO','IPSL','MIROC5','NORESM','CESM','CNRM-ESM2','CNRM-CM6','MRI','UKESM'])\n\n#ax[1].set_yticklabels(['ACCESS','HADGEM','CSIRO','IPSL','MIROC5','NORESM','CESM','CNRM-ESM2','CNRM-CM6','MRI','UKESM'], rotation=0, fontsize = 22)\n\nax[0].plot(CESM_time.year,CESM_time.TT,'--',label='CESM', linewidth=1.2)\nax[0].plot(CNRM_ESM2_time.year,CNRM_ESM2_time.TT,'--',label='CNRM-ESM2', linewidth=1.2)\nax[0].plot(CNRM_CM6_time.year,CNRM_CM6_time.TT,'--',label='CNRM-CM6', linewidth=1.2)\nax[0].plot(MRI_time.year, MRI_time.TT,'--',label='MRI', linewidth=1.2)\nax[0].plot(UKMO_time.year,UKMO_time.TT,'--',label='UKMO', linewidth=1.2, color='navy')\nax[0].legend(frameon=False, ncol=2)\nax[0].hlines(4,1950,2100, color='black', linewidth= 0.7)\nax[1].set_xlabel('Year', fontsize = 12)\nax[0].set_ylabel('Near-surface temperature anomalies [$^{\\circ}$C]', fontsize=14)\nsns.despine()\n\nplt.savefig('Figures/temp_timeline_rol_new.pdf', bbox_inches='tight',dpi=300)\n\n#\"\"\"\n\n\nprint('ACCSS:[',ACCESS_time.TT.sel(year=slice('2077','2091')).min(),ACCESS_time.TT.sel(year=slice('2077','2091')).max(),']',\n 'HADGEM:[',HADGEM_time.TT.sel(year=slice('2055','2075')).min(),HADGEM_time.TT.sel(year=slice('2055','2075')).max(),']',\n 'CSIRO:[',CSIRO_time.TT.sel(year=slice('2080','2091')).min(),CSIRO_time.TT.sel(year=slice('2080','2091')).max(),']',\n 'IPSL:[',IPSL_time.TT.sel(year=slice('2055','2075')).min(),IPSL_time.TT.sel(year=slice('2055','2075')).max(),']',\n 'MIROC5:[',MIROC5_time.TT.sel(year=slice('2059','2079')).min(),MIROC5_time.TT.sel(year=slice('2059','2079')).max(),']',\n 'NORESM:[',NORESM_time.TT.sel(year=slice('2068','2088')).min(),NORESM_time.TT.sel(year=slice('2068','2088')).max(),']',\n 'CESM:[',CESM_time.TT.sel(year=slice('2044','2064')).min(),CESM_time.TT.sel(year=slice('2044','2064')).max(),']',\n 'CNRM_ESM2:[',CNRM_ESM2_time.TT.sel(year=slice('2061','2081')).min(),CNRM_ESM2_time.TT.sel(year=slice('2061','2081')).max(),']',\n 'CNRM_CM6:[',CNRM_CM6_time.TT.sel(year=slice('2059','2079')).min(),CNRM_CM6_time.TT.sel(year=slice('2059','2079')).max(),']',\n 'MRI:[',MRI_time.TT.sel(year=slice('2065','2085')).min(),MRI_time.TT.sel(year=slice('2065','2085')).max(),']',\n 'UKESM:[',UKMO_time.TT.sel(year=slice('2029','2049')).min(),UKMO_time.TT.sel(year=slice('2029','2049')).max(),']')\n\n\"\"\"\n>> output from terminal:\nACCSS:[(3.47513016)(4.18731111) ] \nHADGEM:[(3.40786517)(4.47178334) ] \nCSIRO:[(3.35052158)(3.99524498) ] \nIPSL:[(3.40198087)(4.52394821) ] \nMIROC5:[(3.41014837)(4.75877675) ] \nNORESM:[(3.28768252)(4.71527829) ] \nCESM:[(3.42487348)(4.50217443) ] \nCNRM_ESM2:[(3.39792152)(4.89087548) ] <--- størst \nCNRM_CM6:[(3.31723318)(4.8508086) ] \nMRI:[(3.50770828)(4.64107507) ] \nUKESM:[(3.07230533)(4.66408786) ] <-- minst\n\"\"\""
] | [
[
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots"
]
] |
bundgus/radiothermostat-logging-control-python | [
"3898b90dfbaea93594ff8ac68ac81b57f8a43b70"
] | [
"dynamodb-query-plotly-tstat_log_embed.py"
] | [
"import boto3\nimport decimal\nimport json\nfrom boto3.dynamodb.conditions import Key\nimport pandas as pd\nimport plotly.offline as offline\nfrom plotly.graph_objs import *\nfrom plotly import tools\nimport datetime\nfrom pytz import timezone\nfrom datetime import timedelta\n\nhours_of_history = 24 * 2\n\n\n# Helper class to convert a DynamoDB item to JSON.\n# noinspection PyTypeChecker\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n if o % 1 > 0:\n return float(o)\n else:\n return int(o)\n return super(DecimalEncoder, self).default(o)\n\nnow = datetime.datetime.now(timezone('UTC'))\nstarttimestamp = now - timedelta(hours=hours_of_history)\n\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table('tstat_log')\n\nresponse = table.query(\n ProjectionExpression=\"log_timestamp, ts1_tstate, ts2_tstate, ts1_temp, ts2_temp, \"\n \"ts1_t_cool, ts2_t_cool, wu_temp_f, wu_relative_humidity\",\n KeyConditionExpression=Key('tstat_id').eq('DEADBEEF')\n & Key('log_timestamp').gt(str(starttimestamp))\n)\n\ndf = pd.DataFrame(response['Items'])\n\n# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Client.query\n# If LastEvaluatedKey is present in the response, you will need to paginate the result set.\n# For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide .\nhasmorepages = True\nwhile hasmorepages:\n if 'LastEvaluatedKey' in response:\n print('LastEvaluatedKey', response['LastEvaluatedKey'])\n print('getting next page of results')\n response = table.query(\n ProjectionExpression=\"log_timestamp, ts1_tstate, ts2_tstate, ts1_temp, ts2_temp, \"\n \"ts1_t_cool, ts2_t_cool, wu_temp_f, wu_relative_humidity\",\n KeyConditionExpression=Key('tstat_id').eq('DEADBEEF')\n & Key('log_timestamp').gt(str(starttimestamp)),\n ExclusiveStartKey=response['LastEvaluatedKey']\n )\n df = df.append(pd.DataFrame(response['Items']))\n else:\n print('got all response pages')\n hasmorepages = False\n\n\nstate_ts2 = df['ts2_tstate'].copy()\nstate_ts2[df['ts2_tstate'] == 'Cool'] = 1\nstate_ts2[df['ts2_tstate'] == 'Off'] = 0\nstate_ts2[df['ts2_tstate'] == 'Heat'] = -1\nstate_ts1 = df['ts1_tstate'].copy()\nstate_ts1[df['ts1_tstate'] == 'Cool'] = 1\nstate_ts1[df['ts1_tstate'] == 'Off'] = 0\nstate_ts1[df['ts1_tstate'] == 'Heat'] = -1\ndf['log_timestamp'] = pd.to_datetime(df['log_timestamp'])\n\ndf['log_timestamp'] = df['log_timestamp'].dt.tz_localize('UTC') \\\n .dt.tz_convert('US/Central').dt.tz_localize(None)\n\n# plotting configuration follows:\n\nfig = tools.make_subplots(rows=8, cols=1, print_grid=False, specs=[[{'rowspan': 6}],\n [None],\n [None],\n [None],\n [None],\n [None],\n [{}],\n [{}]\n ], shared_xaxes=True)\n\nts1_temp_trace = Scatter(\n x=df['log_timestamp'],\n y=df['ts1_temp'],\n name='ts1_temp',\n mode='markers',\n marker=dict(\n color='rgb(0, 255, 0)',\n size=5\n )\n)\n\nif 'ts1_t_heat' in df.columns:\n ts1_t_heat_trace = Scatter(\n x=df['log_timestamp'],\n y=df['ts1_t_heat'],\n name='ts1_t_heat',\n mode='lines',\n line=dict(\n color='rgb(255, 0, 0)',\n shape='hvh'\n )\n )\n fig.append_trace(ts1_t_heat_trace, 1, 1)\n\nif 'ts1_t_cool' in df.columns:\n ts1_t_cool_trace = Scatter(\n x=df['log_timestamp'],\n y=df['ts1_t_cool'],\n name='ts1_t_cool',\n mode='lines',\n line=dict(\n color='rgb(0, 0, 255)',\n shape='hvh'\n )\n )\n fig.append_trace(ts1_t_cool_trace, 1, 1)\n\nts2_temp_trace = Scatter(\n x=df['log_timestamp'],\n y=df['ts2_temp'],\n name='ts2_temp',\n mode='markers',\n marker=dict(\n color='rgb(255, 128, 0)',\n size=5\n )\n)\n\nif 'ts2_t_heat' in df.columns:\n ts2_t_heat_trace = Scatter(\n x=df['log_timestamp'],\n y=df['ts2_t_heat'],\n name='ts2_t_heat',\n mode='lines',\n line=dict(\n color='rgb(255, 0, 255)',\n shape='hvh'\n )\n )\n fig.append_trace(ts2_t_heat_trace, 1, 1)\n\nif 'ts2_t_cool' in df.columns:\n ts2_t_cool_trace = Scatter(\n x=df['log_timestamp'],\n y=df['ts2_t_cool'],\n name='ts2_t_cool',\n mode='lines',\n line=dict(\n color='rgb(0, 255, 255)',\n shape='hvh'\n )\n )\n fig.append_trace(ts2_t_cool_trace, 1, 1)\n\nwu_temp_f_trace = Scatter(\n x=df['log_timestamp'],\n y=df['wu_temp_f'],\n name='wu_temp_f',\n mode='lines+markers',\n line=dict(\n color='rgb(0, 0, 0)',\n dash='dash',\n shape='spline'\n )\n)\n\nwu_relative_humidity_trace = Scatter(\n x=df['log_timestamp'],\n y=df['wu_relative_humidity'],\n name='wu_relative_humidity',\n yaxis='y2',\n mode='lines+markers',\n line=dict(\n dash='dash',\n shape='spline'\n )\n)\n\nstate_ts1_trace = Scatter(\n x=df['log_timestamp'],\n y=state_ts1,\n name='state_ts1',\n # yaxis='y2',\n fill='tozeroy',\n line=dict(\n shape='hvh'\n )\n)\n\nstate_ts2_trace = Scatter(\n x=df['log_timestamp'],\n y=state_ts2,\n name='state_ts2',\n # yaxis='y2',\n fill='tozeroy',\n line=dict(\n shape='hvh'\n )\n)\n\nfig.append_trace(wu_temp_f_trace, 1, 1)\nfig.append_trace(ts1_temp_trace, 1, 1)\nfig.append_trace(ts2_temp_trace, 1, 1)\n\nfig.append_trace(state_ts2_trace, 7, 1)\nfig.append_trace(state_ts1_trace, 8, 1)\n\nfig['layout'].update(height=600, title='Temperatures and HVAC State ' + str(now))\n\ndiv = offline.plot(fig, show_link=False, output_type='div', include_plotlyjs=False)\n\nwith open('plotly_embed.html', 'w') as pe:\n pe.write(div)"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
DiegoLigtenberg/Workspace-MasterThesis-MSS | [
"e8183031b6223051049f48e0da2bc2824e60239e"
] | [
"mss/postprocessing/generator.py"
] | [
"from audioop import minmax\nfrom math import prod\nfrom mss.preprocessing.preprocesssing import MinMaxNormalizer\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n# from auto_encoder_vanilla import VariationalAutoEncoder\n# from mss.models.auto_encoder import AutoEncoder\nfrom mss.models.auto_encoder_other import AutoEncoder\nfrom mss.models.atrain import load_fsdd\nimport librosa, librosa.display\nfrom scipy.io import wavfile\nfrom scipy.signal import wiener\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\n\n\ndef main():\n auto_encoder = AutoEncoder.load(\"Model_Other_Vocal_AE\") #model_spectr for first_source_sep\n auto_encoder.summary()\n b_train,y_train = load_fsdd(\"inference\") # note the amnt of datapoints load_fssd loads -> check the function\n (np.min(b_train),np.max(b_train))\n \n\n total_track = []\n reall = False\n estimate = None\n reference = None\n for r in range (1):\n # r=2\n total_track = []\n for i in range(0,30,1): # test 140-160 should be very good! [8, 56, 112, 216, 312, 560]\n sound = i #132 test\n\n # weights = np.full_like(b_train[:1],1/prod(b_train[:1].shape))\n # test[0][:512]*=3\n # test[0][1536:]/=3\n # print(test[0][:512])\n # # print(test)\n # print(5/0)\n if r <=1:\n x_train=np.array(y_train[sound:sound+1]) # y_train when vocal source sep\n else:\n x_train=np.array(b_train[sound:sound+1]) \n\n # x_train += (np.random.rand(b_train.shape[0],b_train.shape[1],b_train.shape[2],b_train.shape[3])-0.5) * 0.3\n print(i,x_train.shape)\n if r == 0:\n x_train = auto_encoder.model.predict(b_train[sound:sound+1])\n\n if r == 0:\n \n # sub(x_train)\n # x_train[(x_train<0.07) & (x_train >= 0.0)] -=.101\n # x_train[:128][(x_train[:128]<0.1) & (x_train[:128] >= 0.0)] *=.2\n\n # x_train[::][(x_train[::]<0.0) & (x_train[::] > -0.2)] /=.2 # the lower the division number (closer to 0) -> the more sound (drums) are removed, but also other sound\n # x_train[::][(x_train[::]<=-0.2)] = -.33\n\n # x_train[(x_train<=0.1)] = -.33\n # x_train[:128][(x_train[:128]<0.1) & (x_train[:128] >= 0.0)] *=.2 # the lower the multiplication number ( closer to 0) -> the more sounds are removed -> BAD actually EQ\n # x_train[975::][(x_train[975::]<0.1) & (x_train[975::] >= 0.0)] *=.1 # high rfequency's we hear harder even when decrease is softer\n # x_train[(x_train<0.0) & (x_train > -0.2)] /=.2 # the lower the division number (closer to 0) -> the more sound (drums) are removed, but also other sound\n # x_train[(x_train<=-0.2)] = -.33\n pass\n\n x_val, y_val = b_train[sound:sound+1],y_train[sound:sound+1]\n y_pred = x_train\n y_pred = tf.convert_to_tensor(y_pred,dtype=tf.float32)\n y_val = tf.cast(y_val, y_pred.dtype)\n val_loss = K.mean(tf.math.squared_difference(y_pred, y_val), axis=-1)\n val_loss = np.mean(val_loss.numpy())\n \n \n print(\"error\\t\\t\",val_loss)\n print(\"error\\t\\t\",np.mean(np.abs((x_train[:1]-y_train[sound:sound+1])**2)))\n print(\"min and max val:\",np.min(x_train),np.max(x_train))\n print(\"mean:\\t\\t\",np.mean(x_train))\n \n\n mute_sound = False\n if -0.15 < np.min(x_train) < 0.15 and -0.15 < np.max(x_train) < 0.15 and -0.15 < np.mean(x_train) < 0.15:\n print(\"mute sound\")\n mute_sound = True\n\n error = (x_train-y_train[sound:sound+1]) *5# *5 to exagerate\n # x_train +=error\n \n # plt.imshow(error[0],cmap=\"gray\",vmin=-1,vmax=1)\n # plt.show()\n\n print(x_train.shape)\n # print(min(x_train))\n # print(max(x_train))\n min_max_normalizer = MinMaxNormalizer(0,1) \n \n x_train = min_max_normalizer.denormalize(x_train)\n x_train = x_train [:,:,:,0]\n # print(x_train[0] == x_train[1])\n x_train = x_train[0]\n x_train = x_train[:,:127]\n x_train = x_train[:-1] \n # x_train[500:] =0 \n x_train = librosa.db_to_amplitude(x_train) \n print(x_train.shape)\n \n # amp_log_spectrogram = librosa.amplitude_to_db(x_train,ref=np.max)\n \n # fig, ax = plt.subplots() \n # img = librosa.display.specshow(amp_log_spectrogram, y_axis='linear', sr=44100, hop_length=1050, x_axis='time', ax=ax)\n # ax.set(title='Log-amplitude spectrogram')\n # ax.label_outer()\n # fig.colorbar(img, ax=ax, format=\"%+2.f dB\")\n # plt.show()\n\n \n if r == 0:\n estimate = x_train\n if r == 1:\n reference = x_train\n\n # reference2 = (reference - np.mean(reference)) / np.std(reference)\n # estimate = (estimate - np.mean(reference)) / np.std(reference)\n # reference = reference2\n \n \n # x_train = librosa.db_to_amplitude(x_train) \n # x_source = wiener(x_train, (5, 5))\n # print(x_source.shape)\n # scale = lambda x: x*1.5 \n # scale(x_train)\n\n # original phase ( gets lot of noise dont do it)\n # signal,sr = librosa.load(\"original.wav\",sr=44100)\n # stft = librosa.stft(signal,n_fft=4096,hop_length=1024)[:-2] \n # mag,phase = librosa.magphase(stft)\n # phase = phase[:,:127] \n # print(phase.shape)\n # print(x_train.shape)\n # new_stft = x_train +1j*phase\n # print(new_stft.shape)\n \n x_source = librosa.griffinlim(x_train,hop_length=1050)\n if mute_sound:\n x_source = np.zeros_like(x_source)+0.001\n # x_source*=1.5\n print((x_source.shape))\n total_track.append(x_source)\n # print(x_source)\n print(\"\\n\\n\\n\")\n # print(x_train.shape)\n # print(x_source.shape)\n total_track = np.array(total_track)\n total_track = total_track.flatten()\n \n print((total_track.shape))\n\n \n if r == 0:\n # total_track = wiener(total_track,mysize=3)\n wavfile.write(\"track_output/other_predict.wav\",44100,total_track) \n # estimate = total_track\n elif r == 1: \n # reference = total_track \n delta = 1e-7 # avoid numerical errors\n print(reference.shape)\n num = np.sum(np.square(reference), axis=None)\n # print(reference,estimate)\n den = np.sum(np.square(reference - estimate), axis=None)\n\n print(np.min(reference),np.max(reference),np.mean(reference))\n print(np.min(estimate),np.max(estimate),np.mean(estimate))\n print(reference.shape)\n print(reference[0])\n print(estimate[0])\n \n \n num += delta\n den += delta\n print(num)\n print(den,\"\\n\")\n print(\"sdr:\\t\", 10 * np.log10(num / den)) # SDR is always 0.5 away from mus_eval SDR thus it is correct to use on spectrogram too just mention how! \n # import museval\n # estimate = np.atleast_2d(estimate)\n # reference = np.atleast_2d(reference)\n # print(estimate.shape)\n # print(reference.shape)\n # print(museval.evaluate(estimates=estimate,references=reference))\n \n wavfile.write(\"track_output/other_target.wav\",44100,total_track) \n else:\n wavfile.write(\"track_output/other_mixture.wav\",44100,total_track) \n\n\n # print(x_train.shape)\n\n\nif __name__==\"__main__\":\n main()"
] | [
[
"numpy.max",
"tensorflow.math.squared_difference",
"tensorflow.convert_to_tensor",
"numpy.array",
"numpy.zeros_like",
"numpy.square",
"numpy.min",
"scipy.io.wavfile.write",
"numpy.mean",
"numpy.abs",
"numpy.log10",
"tensorflow.cast"
]
] |
mthh/gaspar | [
"92e92e195713906f5bb33dc3686a24701fb8cb23"
] | [
"server_app.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport asyncio\nimport binascii\nimport json\nimport logging\nimport math\nimport os\nimport numpy as np\nimport geopandas as gpd\nimport rasterio as rio\nimport rtree\nimport shlex\nimport spacy\nimport subprocess\nimport sys\nimport tempfile\nimport uuid\nimport uvloop\nimport gc\nfrom aiohttp import web\nfrom concurrent.futures import ProcessPoolExecutor\nfrom datetime import datetime\nfrom functools import partial\nfrom pyproj import Proj, transform\nfrom rasterio.features import shapes as rio_shapes\nfrom rasterio import merge as rio_merge\nfrom shapely.geometry import Polygon, shape, mapping\nfrom shapely.ops import unary_union\nfrom glob import glob\n\n\ndef idx_generator_func(bounds):\n for i, bound in enumerate(bounds):\n yield (i, bound, i)\n\n\ndef make_index(bounds):\n return rtree.index.Index(\n [z for z in idx_generator_func(bounds)],\n Interleaved=True,\n )\n\nasync def handler_activity_features(request):\n \"\"\"\n Returns a GeoJSON FeatureCollection\n containing specific features for the requested activity\n (such a 'ski areas', 'ski lift' and 'pistes' for the activity \"ski\").\n \"\"\"\n app = request.app\n category = request.match_info['category']\n\n if category not in app['allowed_activity']:\n return web.Response(text='Error')\n\n app['logger'].info(\n 'Requested features for activity \"{}\" : found {} features'\n .format(category, len(app['layer_activity_{}'.format(category)]))\n )\n result = app['layer_activity_{}'.format(category)].to_json()\n return web.Response(text=result)\n\n\nasync def handler_features_post(request):\n \"\"\"\n Returns a GeoJSON FeatureCollection\n containing features for the requested `category`\n and intersecting with the requested posted feature geometry.\n \"\"\"\n\n app = request.app\n category = request.match_info['category']\n\n posted_data = await request.post()\n _geom = posted_data.get('geometry')\n geom = shape(json.loads(_geom))\n xmin, ymin, xmax, ymax = geom.bounds\n\n app['logger'].info(\n 'Requested {} within {}...'\n .format(category, (xmin, ymin, xmax, ymax)))\n\n async with app['lock']:\n if category not in app['allowed_category']:\n return web.Response(text='Error')\n\n ix_within = list(\n app['index_{}'.format(category)]\n .intersection((xmin, ymin, xmax, ymax)))\n temp = app['layer_{}'.format(category)].iloc[ix_within]\n\n result = temp[temp.geometry.intersects(geom)].to_json()\n\n app['logger'].info(\n '...found {} {} features'\n .format(category, len(ix_within)))\n\n return web.Response(text=result)\n\n\nasync def handler_features(request):\n \"\"\"\n Returns a GeoJSON FeatureCollection\n containing features for the requested `category`\n within the requested `bbox`.\n \"\"\"\n\n app = request.app\n category = request.match_info['category']\n bbox = request.match_info['bbox']\n app['logger'].info(\n 'Requested {} within {}...'\n .format(category, bbox))\n\n async with app['lock']:\n if category not in app['allowed_category']:\n return web.Response(text='Error')\n\n xmin, ymin, xmax, ymax = list(map(float, bbox.split(',')))\n\n ix_within = list(\n app['index_{}'.format(category)]\n .intersection((xmin, ymin, xmax, ymax)))\n result = app['layer_{}'.format(category)].iloc[ix_within].to_json()\n\n app['logger'].info(\n '...found {} {} features'\n .format(category, len(ix_within)))\n\n return web.Response(text=result)\n\n\nasync def index(request):\n \"\"\"Handler for the index page.\"\"\"\n return web.FileResponse('./dist/index.html')\n\n\ndef compute_binary_predicate(_op, _geoms1, _geoms2):\n geoms1 = [shape(i) for i in json.loads(_geoms1)]\n geoms2 = [shape(i) for i in json.loads(_geoms2)]\n result = {}\n for ix1, g1 in enumerate(geoms1):\n result[ix1] = {}\n for ix2, g2 in enumerate(geoms2):\n result[ix1][ix2] = getattr(g1, _op)(g2)\n return json.dumps(result)\n\ndef compute_op_geom(_op, _geoms, options):\n geoms = [shape(i) for i in json.loads(_geoms)]\n\n if _op == 'unary_union':\n res = unary_union(geoms)\n\n elif _op == 'intersection':\n res = geoms[0]\n for _geom in geoms[1:]:\n res = _geom.intersection(res)\n\n elif _op == 'symmetric_difference':\n res = geoms[0].symmetric_difference(geoms[1])\n\n elif _op == 'buffer':\n\n geo_serie = gpd.GeoSeries(\n geoms,\n crs='+proj=longlat +datum=WGS84 +no_defs ',\n ).to_crs(epsg=2154)\n\n if options['dist'] and int(options['dist']) != 0:\n res = unary_union(\n geo_serie.buffer(float(options['dist']))\n .boundary.buffer(float(options['uncertainty']))\n .to_crs('+proj=longlat +datum=WGS84 +no_defs ')\n .values\n )\n else:\n res = unary_union(\n geo_serie\n .buffer(float(options['uncertainty']))\n .to_crs('+proj=longlat +datum=WGS84 +no_defs ')\n .values\n )\n return json.dumps(mapping(res))\n\n\nasync def handler_geom_op(request):\n \"\"\"\n Handles some geo-operations (buffer, unary-union and intersection)\n to be performed on an array of GeoJSON geometries.\n \"\"\"\n _op = request.match_info['op']\n\n if _op in request.app['allowed_binary_predicate']:\n posted_data = await request.post()\n _geoms1 = posted_data.get('geoms1')\n _geoms2 = posted_data.get('geoms2')\n\n result = await request.app.loop.run_in_executor(\n request.app[\"ProcessPool\"],\n compute_binary_predicate,\n _op,\n _geoms1,\n _geoms2,\n )\n\n return web.Response(text=result)\n\n elif _op in request.app['allowed_geom_operation']:\n posted_data = await request.post()\n _geoms = posted_data.get('geoms')\n\n options = {\n 'dist': posted_data.get('distance'),\n 'uncertainty': posted_data.get('uncertainty'),\n } if _op == 'buffer' else None\n\n result = await request.app.loop.run_in_executor(\n request.app[\"ProcessPool\"],\n compute_op_geom,\n _op,\n _geoms,\n options,\n )\n return web.Response(text=result)\n\n else:\n return web.Response(\n text=json.dumps({\n 'message': (\n 'Error : binary predicate or geometric operation '\n f'\\'{_op}\\' not found.'\n ),\n })\n )\n\n\nasync def handler_clue(request):\n \"\"\"\n Handles clues in natural language to extract part of speech and named\n entities if any.\n \"\"\"\n posted_data = await request.post()\n clue_nl = posted_data.get('clue_nl')\n doc = request.app['nlp'](clue_nl)\n part_of_speech = [\n (x.orth_, x.pos_, x.lemma_)\n for x in [\n y for y in doc if not y.is_stop and y.pos_ != 'PUNCT']\n ]\n\n named_entities = [(X.text, X.label_) for X in doc.ents]\n return web.Response(\n text=json.dumps({\n \"part_of_speech\": part_of_speech,\n \"named_entities\": named_entities,\n })\n )\n\n\nasync def handle_404(request, response):\n return web.Response(text=\"ERROR 404 !\")\n\n\nasync def error_middleware(app, handler):\n async def middleware_handler(request):\n try:\n response = await handler(request)\n if response.status == 404:\n return await handle_404(request, response)\n return response\n except web.HTTPException as ex:\n if ex.status == 404:\n return await handle_404(request, ex)\n raise\n\n return middleware_handler\n\n\ndef get_extent_proj(path):\n with rio.open(path) as f:\n crs = f.read_crs()\n bounds = f.bounds\n return {\n 'path': path,\n 'crs_epsg': crs.to_epsg(),\n 'crs_string': Proj(crs.to_string()).srs,\n 'w': math.ceil(bounds[0]),\n 's': math.ceil(bounds[1]),\n 'e': math.floor(bounds[2]),\n 'n': math.floor(bounds[3]),\n 'ewres': f.res[0],\n 'nsres': f.res[1],\n }\n\n\ndef init_grass(info_dem):\n grass_bin = 'grass'\n startcmd = grass_bin + ' --config path'\n p = subprocess.Popen(\n startcmd,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n out, err = p.communicate()\n if p.returncode != 0:\n raise ValueError(\n 'Failed to load GRASS\\nStdout: {}\\nStderr: {}\\n'\n .format(out.decode(), err.decode()))\n\n gisbase = out.strip(b'\\n').decode()\n os.environ['GISBASE'] = gisbase\n sys.path.append(os.path.join(gisbase, 'etc', 'python'))\n gisdb = os.path.join(tempfile.gettempdir(), 'grassdata')\n\n try:\n os.stat(gisdb)\n except FileNotFoundError:\n os.mkdir(gisdb)\n\n location = binascii.hexlify(os.urandom(12)).decode()\n location_path = os.path.join(gisdb, location)\n mapset = 'PERMANENT'\n\n startcmd = ' '.join([\n grass_bin,\n '-c epsg:{}'.format(info_dem['crs_epsg']),\n '-e',\n location_path,\n ])\n\n print('Starting grass with command: `' + startcmd + '`')\n p = subprocess.Popen(\n startcmd,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n out, err = p.communicate()\n if p.returncode != 0:\n raise ValueError(\n 'Failed to load GRASS\\nStdout: {}\\nStderr: {}\\n'\n .format(out.decode(), err.decode()))\n\n print('Created location ', location_path)\n\n import grass.script as grass\n import grass.script.setup as gsetup\n\n gsetup.init(gisbase, gisdb, location, mapset)\n grass.message('--- GRASS GIS 7: Current GRASS GIS 7 environment:')\n print(grass.gisenv())\n\n grass.message('--- GRASS GIS 7: Setting projection info:')\n _out_proj = grass.read_command(\n 'g.proj',\n flags='c',\n epsg=info_dem['crs_epsg'],\n )\n print(_out_proj)\n\n grass.message('--- GRASS GIS 7: Loading DEM file:')\n res = grass.read_command(\n 'r.external',\n flags='o',\n input=info_dem['path'],\n band=1,\n output=\"rast_5cb08c8150bbc7\",\n )\n print(res)\n\n grass.message('--- GRASS GIS 7: Defining the region...')\n grass.read_command(\n 'g.region',\n n=info_dem['n'],\n s=info_dem['s'],\n e=info_dem['e'],\n w=info_dem['w'],\n nsres=info_dem['nsres'],\n ewres=info_dem['ewres'],\n )\n\n return {\n \"gisbase\": gisbase,\n \"gisdb\": gisdb,\n \"location\": location,\n \"mapset\": mapset,\n }\n\n\ndef _validate_number(h):\n # Will raise a ValueError if 'h' isn't / can't be converted\n # to 'float' :\n float(h)\n return h\n\n\ndef _validate_datetime(year, month, day, hour, minute):\n # In order to raise a ValueError if one of them\n # isn't (or cannot be converted to) an 'int' :\n int(year) + int(month) + int(day) + int(hour) + int(minute)\n return (year, month, day, hour, minute)\n\n\ndef _validate_region(region_coords, info_dem):\n in_proj = Proj(info_dem['crs_string'])\n out_proj = Proj(init='epsg:4326')\n _to_projected = partial(transform, out_proj, in_proj)\n if region_coords is None:\n return None\n _coords = list(map(lambda x: float(x), region_coords.split(',')))\n _coords[0], _coords[2] = _to_projected(_coords[0], _coords[2])\n _coords[1], _coords[3] = _to_projected(_coords[1], _coords[3])\n if _coords[0] <= info_dem['w'] or _coords[0] >= info_dem['e'] \\\n or _coords[2] >= info_dem['n'] or _coords[2] <= info_dem['s']:\n raise ValueError(\n 'Requested region {} is outside the allowed region '\n '(xmin={}, xmax={}, ymin={}, ymax={})'\n .format(\n _coords,\n info_dem['w'],\n info_dem['e'],\n info_dem['s'],\n info_dem['n'],\n ))\n return {\n 'w': str(_coords[0]),\n 'e': str(_coords[1]),\n 's': str(_coords[2]),\n 'n': str(_coords[3]),\n }\n\n\ndef _validate_one_position(_coords, info_dem):\n in_proj = Proj(info_dem['crs_string'])\n out_proj = Proj(init='epsg:4326')\n _to_projected = partial(transform, out_proj, in_proj)\n _coords = _to_projected(_coords[1], _coords[0])\n if _coords[1] >= info_dem['n'] or _coords[1] <= info_dem['s'] \\\n or _coords[0] >= info_dem['e'] or _coords[0] <= info_dem['w']:\n raise ValueError(\n 'Requested point {} is outside the allowed region '\n '(xmin={}, xmax={}, ymin={}, ymax={})'\n .format(\n _coords,\n info_dem['w'],\n info_dem['e'],\n info_dem['s'],\n info_dem['n'],\n ))\n return '{},{}'.format(*_coords)\n\n\ndef _validate_coordinates(coords, info_dem):\n if coords.startswith('(') and coords.endswith(')'):\n _coords_list = [\n list(map(lambda x: float(x), c.split(',')))\n for c in coords[1:-1].split('),(')\n ]\n return [\n _validate_one_position(_coords, info_dem)\n for _coords in _coords_list\n ]\n\n else:\n _coords = list(map(lambda x: float(x), coords.split(',')))\n return _validate_one_position(_coords, info_dem)\n\n\nasync def interviz_wrapper(request):\n try:\n c = _validate_coordinates(\n request.rel_url.query['coordinates'],\n request.app['info_dem'],\n )\n h1 = _validate_number(request.rel_url.query['height1'])\n h2 = _validate_number(request.rel_url.query['height2'])\n region = _validate_region(\n request.rel_url.query.get('region', None),\n request.app['info_dem'],\n )\n\n except Exception as e:\n return web.Response(\n text=json.dumps({\"message\": \"Error : {}\".format(e)}))\n\n if isinstance(c, list):\n res = await request.app.loop.run_in_executor(\n request.app[\"ProcessPool\"],\n interviz_multiple,\n request.app['path_info'],\n request.app['info_dem'],\n c,\n h1,\n h2,\n region,\n )\n else:\n res = await request.app.loop.run_in_executor(\n request.app[\"ProcessPool\"],\n interviz,\n request.app['path_info'],\n request.app['info_dem'],\n c,\n h1,\n h2,\n region,\n )\n\n return web.Response(text=res)\n\n\ndef interviz_multiple(path_info, info_dem, coords_list, height1, height2, region):\n import grass.script as GRASS\n try:\n if region:\n GRASS.read_command(\n 'g.region',\n n=region['n'],\n s=region['s'],\n e=region['e'],\n w=region['w'],\n nsres=info_dem['nsres'],\n ewres=info_dem['ewres'],\n )\n\n results_layers = []\n for i, coordinates in enumerate(coords_list):\n uid = str(uuid.uuid4()).replace('-', '')\n grass_name = \"output_{}\".format(uid)\n output_name = os.path.join(path_info['gisdb'], '.'.join([uid, 'tif']))\n results_layers.append(output_name)\n\n GRASS.message(\n '--- GRASS GIS 7: Computing viewshed {}/{}'\n .format(i + 1, len(coords_list))\n )\n res = GRASS.read_command(\n 'r.viewshed',\n input='rast_5cb08c8150bbc7',\n coordinates=coordinates,\n observer_elevation=height1,\n target_elevation=height2,\n # max_distance=max_distance,\n refraction_coeff=\"0.14286\",\n memory=\"1000\",\n flags='b',\n output=grass_name,\n )\n print(res)\n\n GRASS.message(\n '--- GRASS GIS 7: Saving resulting raster layer')\n res = GRASS.read_command(\n 'r.out.gdal',\n input=grass_name,\n output=output_name,\n format=\"GTiff\",\n createopt=\"TFW=YES,COMPRESS=LZW\",\n )\n print(res)\n\n GRASS.message(\n '--- GRASS GIS 7: Remove temporary result raster from GRASS')\n res = GRASS.read_command(\n 'g.remove',\n flags='f',\n type='raster',\n name=grass_name,\n )\n print(res)\n\n if region:\n GRASS.read_command(\n 'g.region',\n n=info_dem['n'],\n s=info_dem['s'],\n e=info_dem['e'],\n w=info_dem['w'],\n nsres=info_dem['nsres'],\n ewres=info_dem['ewres'],\n )\n\n except Exception as e:\n return json.dumps({\"message\": \"Error : {}\".format(e)})\n\n datasets = [rio.open(path_layer, 'r') for path_layer in results_layers]\n res, out_trans = rio_merge.merge(datasets, indexes=1)\n epsg_value = datasets[0].crs.to_epsg()\n results = [{\n 'properties': {'visibility': v},\n 'geometry': s,\n 'type': 'Feature',\n } for i, (s, v) in enumerate(rio_shapes(\n res, mask=None, transform=datasets[0].transform)) if v == 1.0]\n\n with open('/tmp/{}.geojson'.format(uid), 'w') as f:\n f.write(json.dumps({\"type\": \"FeatureCollection\", \"features\": results}))\n\n for ds, path_layer in zip(datasets, results_layers):\n ds.close()\n os.remove(path_layer)\n\n p = subprocess.Popen(\n shlex.split(\n 'ogr2ogr -s_srs \"EPSG:{}\" -t_srs \"EPSG:4326\" '\n '-f GeoJSON /dev/stdout /tmp/{}.geojson'.format(epsg_value, uid)),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n out, err = p.communicate()\n os.remove('/tmp/{}.geojson'.format(uid))\n if p.returncode != 0:\n print('Error: ', err)\n return json.dumps({\"message\": \"Error : {}\".format(err)})\n\n return out.decode()\n\n\ndef interviz(path_info, info_dem, coordinates, height1, height2, region):\n import grass.script as GRASS\n try:\n uid = str(uuid.uuid4()).replace('-', '')\n grass_name = \"output_{}\".format(uid)\n output_name = os.path.join(path_info['gisdb'], '.'.join([uid, 'tif']))\n\n if region:\n GRASS.read_command(\n 'g.region',\n n=region['n'],\n s=region['s'],\n e=region['e'],\n w=region['w'],\n nsres=info_dem['nsres'],\n ewres=info_dem['ewres'],\n )\n\n GRASS.message(\n '--- GRASS GIS 7: Computing viewshed')\n res = GRASS.read_command(\n 'r.viewshed',\n input='rast_5cb08c8150bbc7',\n coordinates=coordinates,\n observer_elevation=height1,\n target_elevation=height2,\n # max_distance=max_distance,\n refraction_coeff=\"0.14286\",\n memory=\"1000\",\n flags='b',\n output=grass_name,\n )\n print(res)\n\n if region:\n GRASS.read_command(\n 'g.region',\n n=info_dem['n'],\n s=info_dem['s'],\n e=info_dem['e'],\n w=info_dem['w'],\n nsres=info_dem['nsres'],\n ewres=info_dem['ewres'],\n )\n\n GRASS.message(\n '--- GRASS GIS 7: Saving resulting raster layer')\n res = GRASS.read_command(\n 'r.out.gdal',\n input=grass_name,\n output=output_name,\n format=\"GTiff\",\n createopt=\"TFW=YES,COMPRESS=LZW\",\n )\n print(res)\n\n GRASS.message(\n '--- GRASS GIS 7: Remove temporary result raster from GRASS')\n res = GRASS.read_command(\n 'g.remove',\n flags='f',\n type='raster',\n name=grass_name,\n )\n print(res)\n\n except Exception as e:\n return json.dumps({\"message\": \"Error : {}\".format(e)})\n\n with rio.open(output_name) as src:\n epsg_value = src.crs.to_epsg()\n image = src.read(1)\n results = [{\n 'properties': {'visibility': v},\n 'geometry': s,\n 'type': 'Feature',\n } for i, (s, v) in enumerate(rio_shapes(\n image, mask=None, transform=src.transform)) if v == 1.0]\n\n with open('/tmp/{}.geojson'.format(uid), 'w') as f:\n f.write(json.dumps({\"type\": \"FeatureCollection\", \"features\": results}))\n\n p = subprocess.Popen(\n shlex.split(\n 'ogr2ogr -s_srs \"EPSG:{}\" -t_srs \"EPSG:4326\" '\n '-f GeoJSON /dev/stdout /tmp/{}.geojson'.format(epsg_value, uid)),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n out, err = p.communicate()\n os.remove('/tmp/{}.geojson'.format(uid))\n os.remove(output_name)\n if p.returncode != 0:\n print('Error: ', err)\n return json.dumps({\"message\": \"Error : {}\".format(err)})\n\n return out.decode()\n\n\nasync def sunmask_wrapper(request):\n try:\n datetime = _validate_datetime(\n request.rel_url.query['year'],\n request.rel_url.query['month'],\n request.rel_url.query['day'],\n request.rel_url.query['hour'],\n request.rel_url.query['minute'],\n )\n region = _validate_region(\n request.rel_url.query.get('region', None),\n request.app['info_dem'],\n )\n timezone = _validate_number(request.rel_url.query.get('timezone', '1'))\n if not 0 <= int(timezone) <= 25:\n raise ValueError('Invalid timezone')\n sun = request.rel_url.query.get('sun', False)\n if isinstance(sun, str):\n if sun.lower() == 'false':\n sun = False\n else:\n sun = True\n\n except Exception as e:\n return web.Response(\n text=json.dumps({\"message\": \"Error : {}\".format(e)}))\n\n res = await request.app.loop.run_in_executor(\n request.app[\"ProcessPool\"],\n sunmask,\n request.app['path_info'],\n request.app['info_dem'],\n datetime,\n region,\n timezone,\n sun,\n )\n\n return web.Response(text=res)\n\n\ndef sunmask(path_info, info_dem, d, region, tz, sun):\n import grass.script as GRASS\n try:\n uid = str(uuid.uuid4()).replace('-', '')\n grass_name = \"output_{}\".format(uid)\n output_name = os.path.join(path_info['gisdb'], '.'.join([uid, 'tif']))\n\n if region:\n GRASS.message(\n '--- GRASS GIS 7: Reducing the region')\n GRASS.read_command(\n 'g.region',\n n=region['n'],\n s=region['s'],\n e=region['e'],\n w=region['w'],\n nsres=info_dem['nsres'],\n ewres=info_dem['ewres'],\n )\n\n GRASS.message(\n '--- GRASS GIS 7: Computing sunmask')\n res = GRASS.read_command(\n 'r.sunmask',\n elevation='rast_5cb08c8150bbc7',\n year=d[0],\n month=d[1],\n day=d[2],\n hour=d[3],\n minute=d[4],\n timezone=tz,\n output=grass_name,\n )\n print(res)\n\n GRASS.message(\n '--- GRASS GIS 7: Saving resulting raster layer')\n res = GRASS.read_command(\n 'r.out.gdal',\n input=grass_name,\n output=output_name,\n format=\"GTiff\",\n createopt=\"TFW=YES,COMPRESS=LZW\",\n )\n print(res)\n\n GRASS.message(\n '--- GRASS GIS 7: Remove temporary result raster from GRASS')\n res = GRASS.read_command(\n 'g.remove',\n flags='f',\n type='raster',\n name=grass_name,\n )\n print(res)\n\n if region:\n GRASS.message(\n '--- GRASS GIS 7: Restoring the region')\n GRASS.read_command(\n 'g.region',\n n=info_dem['n'],\n s=info_dem['s'],\n e=info_dem['e'],\n w=info_dem['w'],\n nsres=info_dem['nsres'],\n ewres=info_dem['ewres'],\n )\n\n except Exception as e:\n return json.dumps({\"message\": \"Error : {}\".format(e)})\n\n with rio.open(output_name) as src:\n epsg_value = src.crs.to_epsg()\n image = src.read(1)\n results = [{\n 'properties': {'shadow': v},\n 'geometry': s,\n 'type': 'Feature',\n } for i, (s, v) in enumerate(rio_shapes(\n image, mask=None, transform=src.transform)) if v == 1.0]\n\n # In this case we want the difference between the region and the\n # computed areas of cast shadow\n if sun:\n region = Polygon([\n (float(region['w']), float(region['s'])),\n (float(region['e']), float(region['s'])),\n (float(region['e']), float(region['n'])),\n (float(region['w']), float(region['n'])),\n (float(region['w']), float(region['s']))\n ])\n shadow_union = unary_union([shape(ft['geometry']) for ft in results])\n results = [{\n 'type': 'Feature',\n 'geometry': mapping(region.difference(shadow_union)),\n 'properties': {'sun': 1.0}\n }]\n\n with open('/tmp/{}.geojson'.format(uid), 'w') as f:\n f.write(json.dumps({\"type\": \"FeatureCollection\", \"features\": results}))\n\n p = subprocess.Popen(\n shlex.split(\n 'ogr2ogr -s_srs \"EPSG:{}\" -t_srs \"EPSG:4326\" '\n '-f GeoJSON /dev/stdout /tmp/{}.geojson'.format(epsg_value, uid)),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n out, err = p.communicate()\n os.remove('/tmp/{}.geojson'.format(uid))\n os.remove(output_name)\n if p.returncode != 0:\n print('Error: ', err)\n return json.dumps({\"message\": \"Error : {}\".format(err)})\n\n return out.decode()\n\n\nasync def sun_wrapper(request):\n try:\n dt = _validate_datetime(\n request.rel_url.query['year'],\n request.rel_url.query['month'],\n request.rel_url.query['day'],\n request.rel_url.query['hour'],\n request.rel_url.query['minute'],\n )\n day = datetime(\n int(dt[0]),\n int(dt[1]),\n int(dt[2]),\n ).timetuple().tm_yday\n time = float(dt[3]) + (float(dt[4]) / 60)\n region = _validate_region(\n request.rel_url.query.get('region', None),\n request.app['info_dem'],\n )\n timezone = _validate_number(request.rel_url.query.get('timezone', '1'))\n if not 0 <= int(timezone) <= 25:\n raise ValueError('Invalid timezone')\n is_sun = request.rel_url.query.get('sun', False)\n if isinstance(sun, str):\n if is_sun.lower() == 'false':\n is_sun = False\n else:\n is_sun = True\n\n except Exception as e:\n return web.Response(\n text=json.dumps({\"message\": \"Error : {}\".format(e)}))\n\n res = await request.app.loop.run_in_executor(\n request.app[\"ProcessPool\"],\n sun,\n request.app['path_info'],\n request.app['info_dem'],\n day,\n time,\n region,\n timezone,\n is_sun,\n )\n\n return web.Response(text=res)\n\n\ndef sun(path_info, info_dem, day, time, region, tz, is_sun):\n import grass.script as GRASS\n try:\n uid = str(uuid.uuid4()).replace('-', '')\n grass_name = \"output_{}\".format(uid)\n output_name = os.path.join(path_info['gisdb'], '.'.join([uid, 'tif']))\n\n if region:\n GRASS.message(\n '--- GRASS GIS 7: Reducing the region')\n GRASS.read_command(\n 'g.region',\n n=region['n'],\n s=region['s'],\n e=region['e'],\n w=region['w'],\n nsres=info_dem['nsres'],\n ewres=info_dem['ewres'],\n )\n\n GRASS.message(\n '--- GRASS GIS 7: Computing longitude map')\n\n GRASS.read_command(\n 'r.latlong',\n flags='l',\n input='rast_5cb08c8150bbc7',\n output='rast_long_5cb08c8150bbc7',\n )\n\n GRASS.message(\n '--- GRASS GIS 7: Computing sun incidence')\n res = GRASS.read_command(\n 'r.sun',\n elevation='rast_5cb08c8150bbc7',\n long='rast_long_5cb08c8150bbc7',\n day=day,\n time=time,\n civil_time=tz,\n incidout=grass_name,\n nprocs=2,\n )\n print(res)\n\n GRASS.message(\n '--- GRASS GIS 7: Saving resulting raster layer')\n res = GRASS.read_command(\n 'r.out.gdal',\n input=grass_name,\n output=output_name,\n format=\"GTiff\",\n createopt=\"TFW=YES,COMPRESS=LZW\",\n )\n print(res)\n\n GRASS.message(\n '--- GRASS GIS 7: Remove temporary result raster from GRASS')\n res = GRASS.read_command(\n 'g.remove',\n flags='f',\n type='raster',\n name=grass_name,\n )\n print(res)\n res = GRASS.read_command(\n 'g.remove',\n flags='f',\n type='raster',\n name='rast_long_5cb08c8150bbc7',\n )\n print(res)\n if region:\n GRASS.message(\n '--- GRASS GIS 7: Restoring the region')\n GRASS.read_command(\n 'g.region',\n n=info_dem['n'],\n s=info_dem['s'],\n e=info_dem['e'],\n w=info_dem['w'],\n nsres=info_dem['nsres'],\n ewres=info_dem['ewres'],\n )\n\n except Exception as e:\n return json.dumps({\"message\": \"Error : {}\".format(e)})\n\n with rio.open(output_name) as src:\n epsg_value = src.crs.to_epsg()\n image = src.read(1)\n image = np.nan_to_num(image)\n image[image >= 1.0] = 1.0\n if is_sun:\n results = [{\n 'properties': {'sun': v},\n 'geometry': s,\n 'type': 'Feature',\n } for i, (s, v) in enumerate(rio_shapes(\n image, mask=None, transform=src.transform)) if v == 1.0]\n else:\n results = [{\n 'properties': {'sun': v},\n 'geometry': s,\n 'type': 'Feature',\n } for i, (s, v) in enumerate(rio_shapes(\n image, mask=None, transform=src.transform)) if v != 1.0]\n\n with open('/tmp/{}.geojson'.format(uid), 'w') as f:\n f.write(json.dumps({\"type\": \"FeatureCollection\", \"features\": results}))\n\n p = subprocess.Popen(\n shlex.split(\n 'ogr2ogr -s_srs \"EPSG:{}\" -t_srs \"EPSG:4326\" '\n '-f GeoJSON /dev/stdout /tmp/{}.geojson'.format(epsg_value, uid)),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n out, err = p.communicate()\n os.remove('/tmp/{}.geojson'.format(uid))\n if p.returncode != 0:\n print('Error: ', err)\n return json.dumps({\"message\": \"Error : {}\".format(err)})\n\n return out.decode()\n\n\nasync def make_app(loop, info_dem, addr='0.0.0.0', port='8008'):\n logging.basicConfig(level=logging.INFO)\n app = web.Application(\n loop=loop,\n client_max_size=17408**2,\n middlewares=[error_middleware],\n )\n app['logger'] = logging.getLogger(\"features.main\")\n app['path_info'] = init_grass(info_dem)\n app['info_dem'] = info_dem\n app.add_routes([\n web.get('/sun', sun_wrapper),\n web.get('/sunmask', sunmask_wrapper),\n web.get('/viewshed', interviz_wrapper),\n web.get('/activity-features/{category}', handler_activity_features),\n web.get('/features/{category}/{bbox}', handler_features),\n web.post('/features/{category}', handler_features_post),\n web.post('/parse-clue', handler_clue),\n web.post('/{op}', handler_geom_op),\n web.get('/', index),\n web.static('/', 'dist/'),\n ])\n handler = app.make_handler()\n srv = await loop.create_server(handler, addr, port)\n return srv, app, handler\n\n\ndef main(prefix_data='data/osm/'):\n filename = glob('data/elevation/*.tif')\n info_dem = get_extent_proj(filename[0])\n\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n loop = asyncio.get_event_loop()\n asyncio.set_event_loop(loop)\n srv, app, handler = loop.run_until_complete(make_app(loop, info_dem))\n\n app['allowed_binary_predicate'] = {\n 'intersects',\n 'equals',\n 'contains',\n 'crosses',\n 'overlaps',\n 'touches',\n 'within',\n }\n\n app['allowed_geom_operation'] = {\n 'buffer',\n 'intersection',\n 'difference',\n 'symmetric_difference',\n 'unary_union',\n }\n\n app['allowed_category'] = {\n 'RIVER',\n 'LAKE',\n 'RESERVOIR',\n 'ROAD',\n 'PATHWAY',\n 'POWERLINE',\n 'PISTE',\n 'PEAK',\n 'COL',\n 'SKILIFT',\n 'CITY',\n 'TOWN',\n 'VILLAGE',\n }\n\n app['allowed_activity'] = {\n 'ski',\n 'randonnee',\n 'speleologie',\n 'escalade',\n 'vtt',\n }\n app['lock'] = asyncio.Lock()\n app['logger'].info('Opening OSM layers in memory...')\n app['layer_RIVER'] = gpd.read_file(\n os.path.join(prefix_data, 'eaux_courantes_choucas.geojson'))\n app['layer_LAKE'] = gpd.read_file(\n os.path.join(prefix_data, 'water_lake_choucas.geojson'))\n app['layer_RESERVOIR'] = gpd.read_file(\n os.path.join(prefix_data, 'water_reservoir_choucas.geojson'))\n app['layer_ROAD'] = gpd.read_file(\n os.path.join(prefix_data, 'routes_choucas.geojson'))\n app['layer_PATHWAY'] = gpd.read_file(\n os.path.join(prefix_data, 'sentiers_choucas.geojson'))\n app['layer_POWERLINE'] = gpd.read_file(\n os.path.join(prefix_data, 'powerline_choucas.geojson'))\n app['layer_PISTE'] = gpd.read_file(\n os.path.join(prefix_data, 'pistes_choucas.geojson'))\n app['layer_PEAK'] = gpd.read_file(\n os.path.join(prefix_data, 'peak_choucas.geojson'))\n app['layer_COL'] = gpd.read_file(\n os.path.join(prefix_data, 'col_choucas.geojson'))\n app['layer_SKILIFT'] = gpd.read_file(\n os.path.join(prefix_data, 'cable_skilift_choucas.geojson'))\n app['layer_CITY'] = gpd.read_file(\n os.path.join(prefix_data, 'city_choucas.geojson'))\n app['layer_TOWN'] = gpd.read_file(\n os.path.join(prefix_data, 'town_choucas.geojson'))\n app['layer_VILLAGE'] = gpd.read_file(\n os.path.join(prefix_data, 'village_choucas.geojson'))\n\n # Specific layers related to the activity of the victim\n app['layer_activity_ski'] = gpd.read_file(\n os.path.join(\n prefix_data,\n 'domaine_station_remontee_ski_choucas_large.geojson'))\n app['layer_activity_speleologie'] = gpd.read_file(\n os.path.join(\n prefix_data,\n 'cave_entrance_speleologie_choucas_large.geojson'))\n app['layer_activity_escalade'] = gpd.read_file(\n os.path.join(\n prefix_data,\n 'sport_climbing_escalade_choucas_large.geojson'))\n app['layer_activity_vtt'] = gpd.read_file(\n os.path.join(\n prefix_data,\n 'mtb_scale_vtt_choucas_large.geojson'))\n\n app['logger'].info('Creating spatial index for OSM layers...')\n\n for lyr_name in app['allowed_category']:\n app['index_{}'.format(lyr_name)] = make_index(\n [g.bounds for g in app['layer_{}'.format(lyr_name)].geometry])\n\n app['logger'].info('Loading spaCy model for French...')\n app['nlp'] = spacy.load('fr_core_news_sm')\n app['ProcessPool'] = ProcessPoolExecutor(1)\n app['logger'].info('Serving on' + str(srv.sockets[0].getsockname()))\n\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n srv.close()\n loop.run_until_complete(srv.wait_closed())\n loop.run_until_complete(app.shutdown())\n loop.run_until_complete(handler.shutdown(60.0))\n loop.run_until_complete(app.cleanup())\n loop.close()\n\n\nif __name__ == '__main__':\n gc.disable()\n main()\n"
] | [
[
"numpy.nan_to_num"
]
] |
inzi/MobileNet-SSD-RealSense | [
"b9fcc4ade65a8c28496a4a87915157c3e3c17177"
] | [
"main.py"
] | [
"import sys, os\nif sys.version_info.major < 3 or sys.version_info.minor < 4:\n print(\"Please using python3.4 or greater!\")\n sys.exit(1)\nimport numpy as np\nimport cv2, io, time, argparse, re\nfrom os import system\nfrom os.path import isfile, join\nfrom time import sleep\nimport multiprocessing as mp\nfrom openvino.inference_engine import IENetwork, IEPlugin\nimport heapq\nimport threading\nfrom imutils.video import VideoStream\n\n#Django connect\napi_path = '/home/pi/workspace/bbw3.2/bb' #os.path.abspath('~','workspace','bbw3.2','bb', 'bb')\n# print(api_path) # home/foo/work\n\nsys.path.append(api_path) \n\n# print(sys.path)\n\nimport django\nfrom django.conf import settings\nfrom bb import settings as bbsettings\n#os.environ['DJANGO_SETTINGS_MODULE']='bb.settings'\n\n\nsettings.configure(DATABASES=bbsettings.DATABASES, DEBUG=True)\n\ndjango.setup()\n\n#from captures.models import Capture\nprint ('starting...')\n\n\n\npipeline = None\nlastresults = None\nthreads = []\nprocesses = []\nframeBuffer = None\nresults = None\nfps = \"\"\ndetectfps = \"\"\nframecount = 0\ndetectframecount = 0\ntime1 = 0\ntime2 = 0\ncam = None\ncamera_mode = 0\ncamera_width = 320\ncamera_height = 240\nwindow_name = \"\"\nbackground_transparent_mode = 0\nssd_detection_mode = 1\nface_detection_mode = 0\nelapsedtime = 0.0\nbackground_img = None\ndepth_sensor = None\ndepth_scale = 1.0\nalign_to = None\nalign = None\n\nLABELS = [['background',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor'],\n ['background', 'face']]\n\ndef camThread(LABELS, results, frameBuffer, camera_mode, camera_width, camera_height, background_transparent_mode, background_img, vidfps):\n global fps\n global detectfps\n global lastresults\n global framecount\n global detectframecount\n global time1\n global time2\n global cam\n global window_name\n global depth_scale\n global align_to\n global align\n\n # Configure depth and color streams\n # Or\n # Open USB Camera streams\n # # if camera_mode == 0:\n # # pipeline = rs.pipeline()\n # # config = rs.config()\n # # config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, vidfps)\n # # config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, vidfps)\n # # profile = pipeline.start(config)\n # # depth_sensor = profile.get_device().first_depth_sensor()\n # # depth_scale = depth_sensor.get_depth_scale()\n # # align_to = rs.stream.color\n # # align = rs.align(align_to)\n # # window_name = \"RealSense\"\n # # elif camera_mode == 1:\n # # cam = cv2.VideoCapture(0)\n # # if cam.isOpened() != True:\n # # print(\"USB Camera Open Error!!!\")\n # # sys.exit(0)\n # # cam.set(cv2.CAP_PROP_FPS, vidfps)\n # # cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)\n # # cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)\n # # window_name = \"USB Camera\"\n\n cam = VideoStream(usePiCamera=True, \n resolution=(640, 480),\n framerate = 32).start()\n window_name = \"picam\"\n \n cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)\n\n while True:\n t1 = time.perf_counter()\n\n # 0:= RealSense Mode\n # 1:= USB Camera Mode\n\n\n # USB Camera Stream Read\n color_image = cam.read()\n if frameBuffer.full():\n frameBuffer.get()\n frames = color_image\n\n height = color_image.shape[0]\n width = color_image.shape[1]\n frameBuffer.put(color_image.copy())\n res = None\n\n if not results.empty():\n res = results.get(False)\n detectframecount += 1\n imdraw = overlay_on_image(frames, res, LABELS, camera_mode, background_transparent_mode,\n background_img, depth_scale=depth_scale, align=align)\n lastresults = res\n else:\n continue\n # imdraw = overlay_on_image(frames, lastresults, LABELS, camera_mode, background_transparent_mode,\n # background_img, depth_scale=depth_scale, align=align)\n\n cv2.imshow(window_name, cv2.resize(imdraw, (width, height)))\n\n if cv2.waitKey(1)&0xFF == ord('q'):\n # Stop streaming\n if pipeline != None:\n pipeline.stop()\n sys.exit(0)\n\n ## Print FPS\n framecount += 1\n if framecount >= 15:\n fps = \"(Playback) {:.1f} FPS\".format(time1/15)\n detectfps = \"(Detection) {:.1f} FPS\".format(detectframecount/time2)\n framecount = 0\n detectframecount = 0\n time1 = 0\n time2 = 0\n t2 = time.perf_counter()\n elapsedTime = t2-t1\n time1 += 1/elapsedTime\n time2 += elapsedTime\n\n\n# l = Search list\n# x = Search target value\ndef searchlist(l, x, notfoundvalue=-1):\n if x in l:\n return l.index(x)\n else:\n return notfoundvalue\n\n\ndef async_infer(ncsworker):\n\n #ncsworker.skip_frame_measurement()\n\n while True:\n ncsworker.predict_async()\n\n\nclass NcsWorker(object):\n\n def __init__(self, devid, frameBuffer, results, camera_mode, camera_width, camera_height, number_of_ncs, vidfps, skpfrm):\n self.devid = devid\n self.frameBuffer = frameBuffer\n self.model_xml = \"./lrmodel/MobileNetSSD/MobileNetSSD_deploy.xml\"\n self.model_bin = \"./lrmodel/MobileNetSSD/MobileNetSSD_deploy.bin\"\n self.camera_width = camera_width\n self.camera_height = camera_height\n self.num_requests = 4\n self.inferred_request = [0] * self.num_requests\n self.heap_request = []\n self.inferred_cnt = 0\n self.plugin = IEPlugin(device=\"MYRIAD\")\n self.net = IENetwork(model=self.model_xml, weights=self.model_bin)\n self.input_blob = next(iter(self.net.inputs))\n self.exec_net = self.plugin.load(network=self.net, num_requests=self.num_requests)\n self.results = results\n self.camera_mode = camera_mode\n self.number_of_ncs = number_of_ncs\n if self.camera_mode == 0:\n self.skip_frame = skpfrm\n else:\n self.skip_frame = 0\n self.roop_frame = 0\n self.vidfps = vidfps\n\n def image_preprocessing(self, color_image):\n\n prepimg = cv2.resize(color_image, (300, 300))\n prepimg = prepimg - 127.5\n prepimg = prepimg * 0.007843\n prepimg = prepimg[np.newaxis, :, :, :] # Batch size axis add\n prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW\n return prepimg\n\n\n def predict_async(self):\n try:\n\n if self.frameBuffer.empty():\n return\n\n self.roop_frame += 1\n if self.roop_frame <= self.skip_frame:\n self.frameBuffer.get()\n return\n self.roop_frame = 0\n\n prepimg = self.image_preprocessing(self.frameBuffer.get())\n reqnum = searchlist(self.inferred_request, 0)\n\n if reqnum > -1:\n self.exec_net.start_async(request_id=reqnum, inputs={self.input_blob: prepimg})\n self.inferred_request[reqnum] = 1\n self.inferred_cnt += 1\n if self.inferred_cnt == sys.maxsize:\n self.inferred_request = [0] * self.num_requests\n self.heap_request = []\n self.inferred_cnt = 0\n heapq.heappush(self.heap_request, (self.inferred_cnt, reqnum))\n\n cnt, dev = heapq.heappop(self.heap_request)\n\n if self.exec_net.requests[dev].wait(0) == 0:\n self.exec_net.requests[dev].wait(-1)\n out = self.exec_net.requests[dev].outputs[\"detection_out\"].flatten()\n self.results.put([out])\n self.inferred_request[dev] = 0\n else:\n heapq.heappush(self.heap_request, (cnt, dev))\n\n except:\n import traceback\n traceback.print_exc()\n\n\ndef inferencer(results, frameBuffer, ssd_detection_mode, face_detection_mode, camera_mode, camera_width, camera_height, number_of_ncs, vidfps, skpfrm):\n\n # Init infer threads\n threads = []\n for devid in range(number_of_ncs):\n thworker = threading.Thread(target=async_infer, args=(NcsWorker(devid, frameBuffer, results, camera_mode, camera_width, camera_height, number_of_ncs, vidfps, skpfrm),))\n thworker.start()\n threads.append(thworker)\n\n for th in threads:\n th.join()\n\n\ndef overlay_on_image(frames, object_infos, LABELS, camera_mode, background_transparent_mode, background_img, depth_scale=1.0, align=None):\n\n try:\n\n # 0:=RealSense Mode, 1:=USB Camera Mode\n\n color_image = frames\n\n if isinstance(object_infos, type(None)):\n # 0:= No background transparent, 1:= Background transparent\n if background_transparent_mode == 0:\n return color_image\n elif background_transparent_mode == 1:\n return background_img\n\n # Show images\n height = color_image.shape[0]\n width = color_image.shape[1]\n entire_pixel = height * width\n occupancy_threshold = 0.9\n\n if background_transparent_mode == 0:\n img_cp = color_image.copy()\n elif background_transparent_mode == 1:\n img_cp = background_img.copy()\n\n for (object_info, LABEL) in zip(object_infos, LABELS):\n\n drawing_initial_flag = True\n\n for box_index in range(100):\n if object_info[box_index + 1] == 0.0:\n break\n base_index = box_index * 7\n if (not np.isfinite(object_info[base_index]) or\n not np.isfinite(object_info[base_index + 1]) or\n not np.isfinite(object_info[base_index + 2]) or\n not np.isfinite(object_info[base_index + 3]) or\n not np.isfinite(object_info[base_index + 4]) or\n not np.isfinite(object_info[base_index + 5]) or\n not np.isfinite(object_info[base_index + 6])):\n continue\n\n x1 = max(0, int(object_info[base_index + 3] * height))\n y1 = max(0, int(object_info[base_index + 4] * width))\n x2 = min(height, int(object_info[base_index + 5] * height))\n y2 = min(width, int(object_info[base_index + 6] * width))\n\n object_info_overlay = object_info[base_index:base_index + 7]\n\n # 0:= No background transparent, 1:= Background transparent\n if background_transparent_mode == 0:\n min_score_percent = 60\n elif background_transparent_mode == 1:\n min_score_percent = 20\n\n source_image_width = width\n source_image_height = height\n\n base_index = 0\n class_id = object_info_overlay[base_index + 1]\n percentage = int(object_info_overlay[base_index + 2] * 100)\n if (percentage <= min_score_percent):\n continue\n\n box_left = int(object_info_overlay[base_index + 3] * source_image_width)\n box_top = int(object_info_overlay[base_index + 4] * source_image_height)\n box_right = int(object_info_overlay[base_index + 5] * source_image_width)\n box_bottom = int(object_info_overlay[base_index + 6] * source_image_height)\n\n # 0:=RealSense Mode, 1:=USB Camera Mode\n label_text = LABEL[int(class_id)] + \" (\" + str(percentage) + \"%)\"\n\n box_color = (255, 128, 0)\n box_thickness = 1\n cv2.rectangle(img_cp, (box_left, box_top), (box_right, box_bottom), box_color, box_thickness)\n label_background_color = (125, 175, 75)\n label_text_color = (255, 255, 255)\n label_size = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]\n label_left = box_left\n label_top = box_top - label_size[1]\n if (label_top < 1):\n label_top = 1\n label_right = label_left + label_size[0]\n label_bottom = label_top + label_size[1]\n cv2.rectangle(img_cp, (label_left - 1, label_top - 1), (label_right + 1, label_bottom + 1), label_background_color, -1)\n cv2.putText(img_cp, label_text, (label_left, label_bottom), cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color, 1)\n\n \n cv2.putText(img_cp, fps, (width-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)\n cv2.putText(img_cp, detectfps, (width-170,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)\n return img_cp\n\n except:\n import traceback\n traceback.print_exc()\n\n\nif __name__ == '__main__':\n\n print ('__main__')\n parser = argparse.ArgumentParser()\n parser.add_argument('-mod','--mode',dest='camera_mode',type=int,default=0,help='Camera Mode. (0:=RealSense Mode, 1:=USB Camera Mode. Defalut=0)')\n parser.add_argument('-wd','--width',dest='camera_width',type=int,default=320,help='Width of the frames in the video stream. (USB Camera Mode Only. Default=320)')\n parser.add_argument('-ht','--height',dest='camera_height',type=int,default=240,help='Height of the frames in the video stream. (USB Camera Mode Only. Default=240)')\n parser.add_argument('-tp','--transparent',dest='background_transparent_mode',type=int,default=0,help='TransparentMode. (RealSense Mode Only. 0:=No background transparent, 1:=Background transparent)')\n parser.add_argument('-sd','--ssddetection',dest='ssd_detection_mode',type=int,default=1,help='[Future functions] SSDDetectionMode. (0:=Disabled, 1:=Enabled Default=1)')\n parser.add_argument('-fd','--facedetection',dest='face_detection_mode',type=int,default=0,help='[Future functions] FaceDetectionMode. (0:=Disabled, 1:=Full, 2:=Short Default=0)')\n parser.add_argument('-numncs','--numberofncs',dest='number_of_ncs',type=int,default=1,help='Number of NCS. (Default=1)')\n parser.add_argument('-vidfps','--fpsofvideo',dest='fps_of_video',type=int,default=30,help='FPS of Video. (USB Camera Mode Only. Default=30)')\n parser.add_argument('-skpfrm','--skipframe',dest='number_of_frame_skip',type=int,default=7,help='Number of frame skip. (RealSense Mode Only. Default=7)')\n\n args = parser.parse_args()\n print ('past args')\n camera_mode = args.camera_mode\n camera_width = args.camera_width\n camera_height = args.camera_height\n background_transparent_mode = args.background_transparent_mode\n ssd_detection_mode = args.ssd_detection_mode\n face_detection_mode = args.face_detection_mode\n number_of_ncs = args.number_of_ncs\n vidfps = args.fps_of_video\n skpfrm = args.number_of_frame_skip\n\n # 0:=RealSense Mode, 1:=USB Camera Mode\n if camera_mode != 0 and camera_mode != 1:\n print(\"Camera Mode Error!! \" + str(camera_mode))\n sys.exit(0)\n\n if camera_mode != 0 and background_transparent_mode == 1:\n background_transparent_mode = 0\n\n if background_transparent_mode == 1:\n background_img = np.zeros((camera_height, camera_width, 3), dtype=np.uint8)\n\n if face_detection_mode != 0:\n ssd_detection_mode = 0\n\n if ssd_detection_mode == 0 and face_detection_mode != 0:\n del(LABELS[0])\n\n try:\n\n mp.set_start_method('forkserver')\n frameBuffer = mp.Queue(10)\n results = mp.Queue()\n\n # Start streaming\n p = mp.Process(target=camThread,\n args=(LABELS, results, frameBuffer, camera_mode, camera_width, camera_height, background_transparent_mode, background_img, vidfps),\n daemon=True)\n p.start()\n processes.append(p)\n\n # Start detection MultiStick\n # Activation of inferencer\n p = mp.Process(target=inferencer,\n args=(results, frameBuffer, ssd_detection_mode, face_detection_mode, camera_mode, camera_width, camera_height, number_of_ncs, vidfps, skpfrm),\n daemon=True)\n p.start()\n processes.append(p)\n\n while True:\n sleep(1)\n\n except:\n import traceback\n traceback.print_exc()\n finally:\n for p in range(len(processes)):\n processes[p].terminate()\n\n print(\"\\n\\nFinished\\n\\n\")\n"
] | [
[
"numpy.isfinite",
"numpy.zeros"
]
] |
fojor/object-cut | [
"2e9102ef7d21e056110a94931a91a75ae6a2114a"
] | [
"inference/src/utils/run.py"
] | [
"import time\nimport warnings\nimport cv2\nimport numpy as np\nimport torch\n\nfrom PIL import Image, ImageOps\nfrom torch.autograd import Variable\nfrom scipy import ndimage\nfrom torchvision import transforms\n\nfrom src.utils.data_loader import RescaleT, ToTensorLab\nfrom src.utils import log\n\n\ndef _load_img(image):\n \"\"\"\n Create DataLoader instance form input path list.\n :param image: image.\n :return: DataLoader instance.\n \"\"\"\n label_3 = np.zeros(image.shape)\n\n label = np.zeros(label_3.shape[0:2])\n if 3 == len(label_3.shape):\n label = label_3[:, :, 0]\n elif 2 == len(label_3.shape):\n label = label_3\n\n if 3 == len(image.shape) and 2 == len(label.shape):\n label = label[:, :, np.newaxis]\n elif 2 == len(image.shape) and 2 == len(label.shape):\n image = image[:, :, np.newaxis]\n label = label[:, :, np.newaxis]\n sample = dict(image=image, label=label)\n transform = transforms.Compose([RescaleT(320), ToTensorLab(flag=0)])\n sample = transform(sample)\n return sample\n\n\ndef define_model(model, model_path, gpu):\n \"\"\"\n Define model given some parameters.\n :param model: Model enumeration.\n :param model_path: Model file path.\n :param gpu: If GPU is available or not.\n :return: Model instance.\n \"\"\"\n net = model.value()\n if gpu:\n net.load_state_dict(torch.load(model_path))\n if torch.cuda.is_available():\n net.cuda()\n else:\n net.load_state_dict(torch.load(model_path, map_location='cpu'))\n net.eval()\n return net\n\n\n# noinspection PyUnresolvedReferences\ndef _normalize_prediction(prediction):\n \"\"\"\n Normalize the predicted SOD probability map.\n :param prediction: Model prediction.\n :return: Prediction normalized.\n \"\"\"\n maximum = torch.max(prediction)\n minimum = torch.min(prediction)\n prediction_normalized = (prediction - minimum) / (maximum - minimum)\n return prediction_normalized\n\n\ndef unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0):\n \"\"\"\n Return a sharpened version of the image, using an unsharp mask.\n :param image: Input image.\n :param kernel_size: Size of the kernel use for the morphology operation.\n :param sigma: Parameter used for the Gaussian Blur.\n :param amount: Parameter to control sharpening.\n :param threshold: Parameter to control sharpening.\n \"\"\"\n blurred = cv2.GaussianBlur(image, kernel_size, sigma)\n sharpened = float(amount + 1) * image - float(amount) * blurred\n sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))\n sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))\n sharpened = sharpened.round().astype(np.uint8)\n if threshold > 0:\n low_contrast_mask = np.absolute(image - blurred) < threshold\n np.copyto(sharpened, image, where=low_contrast_mask)\n return sharpened\n\n\n# noinspection PyArgumentList\[email protected]_grad()\nasync def run(net, image, to_remove, color_removal):\n \"\"\"\n Run inference using U^2-Net model.\n :param net: model loaded\n :param image: Input image\n :param to_remove: Element to remove the input image result\n :param color_removal: Color from the removed or erased part\n :return: The image processed.\n \"\"\"\n image_original = Image.fromarray(np.uint8(image))\n warnings.simplefilter('ignore', UserWarning)\n sample = _load_img(image)\n inputs_test = sample['image'].unsqueeze(0)\n inputs_test = inputs_test.type(torch.FloatTensor)\n\n # Inference\n log.info('Starting inference')\n try:\n start_time = time.time()\n if torch.cuda.is_available():\n inputs_test = Variable(inputs_test.cuda())\n else:\n inputs_test = Variable(inputs_test)\n # Inference\n d1 = net(inputs_test)[0]\n # Normalize\n prediction = d1[:, 0, :, :]\n prediction = _normalize_prediction(prediction)\n\n prediction = prediction.squeeze()\n prediction = prediction.cpu().data.numpy()\n prediction = prediction * 255\n\n # threshold mask\n idx = prediction >= 5\n prediction[idx] = 255\n idx = prediction < 5\n prediction[idx] = 0\n\n # Sharpening algorithm\n prediction = cv2.erode(prediction, np.ones((3, 3), np.uint8), iterations=1)\n prediction = ndimage.gaussian_filter(prediction, sigma=(2, 2), order=0)\n prediction = unsharp_mask(prediction, amount=15.0)\n\n # Put alpha\n prediction = cv2.resize(prediction, dsize=image_original.size, interpolation=cv2.INTER_LANCZOS4)\n mask = Image.fromarray(prediction).convert('L')\n if to_remove == 'foreground':\n mask = ImageOps.invert(mask)\n if color_removal == 'white':\n background = Image.new('RGB', mask.size, (255, 255, 255))\n else:\n background = Image.new('RGBA', mask.size, (255, 255, 255, 0))\n\n # Generate output image with the mask\n output_image = Image.composite(image_original, background, mask)\n output_image = output_image.resize(\n (image_original.width, image_original.height), resample=Image.LANCZOS\n )\n\n # Clean\n del d1\n\n total_time = (time.time() - start_time) * 1000.0\n log.info('{:.2f}ms'.format(total_time))\n return output_image, None\n\n except Exception as e:\n error_message = 'Error on request: [{}]'.format(e)\n log.error(error_message)\n log.exception(e)\n return None, error_message\n"
] | [
[
"numpy.uint8",
"numpy.copyto",
"torch.min",
"numpy.zeros",
"torch.max",
"torch.autograd.Variable",
"torch.no_grad",
"numpy.ones",
"scipy.ndimage.gaussian_filter",
"torch.cuda.is_available",
"torch.load",
"numpy.absolute"
]
] |
TiankunZhou/dxtbx | [
"9a45d44ccc78dae7b4a33bd938df67d1bac56867"
] | [
"format/FormatHDF5EigerNearlyNexus.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport sys\nimport uuid\n\nimport h5py\nimport numpy as np\n\nfrom iotbx.detectors.eiger import EIGERImage\nfrom scitbx import matrix\n\nfrom dxtbx.format.FormatHDF5 import FormatHDF5\nfrom dxtbx.format.FormatPilatusHelpers import determine_eiger_mask\nfrom dxtbx.format.FormatPilatusHelpers import get_vendortype_eiger as gv\nfrom dxtbx.format.nexus import (\n BeamFactory,\n DataFactory,\n DataFactoryCache,\n DetectorFactory,\n GoniometerFactory,\n MaskFactory,\n NXdata,\n NXmxReader,\n generate_scan_model,\n)\n\n\ndef find_entries(nx_file):\n \"\"\"\n Find NXmx entries\n \"\"\"\n if \"entry\" in nx_file:\n entry = nx_file[\"entry\"]\n if \"NX_class\" in entry.attrs:\n if entry.attrs[\"NX_class\"] == np.string_(\"NXentry\"):\n if \"definition\" not in entry:\n return entry\n return None\n\n\ndef is_eiger_nearly_nexus_file(filename):\n \"\"\"\n A hacky function to check if this is an EIGER-flavoured nexus file\n \"\"\"\n # Get the file handle\n with h5py.File(filename, \"r\") as handle:\n # Find the NXmx entries\n entry = find_entries(handle)\n if entry is not None:\n try:\n return (\n np.string_(\"dectris eiger\")\n in entry[\"instrument\"][\"detector\"][\"description\"][()].lower()\n )\n except KeyError:\n pass\n return False\n\n\nclass EigerNXmxFixer(object):\n \"\"\"\n A hacky class to read an NXmx file\n \"\"\"\n\n def __init__(self, input_filename, memory_mapped_name):\n # Copy the master file to the in memory handle\n handle_orig = h5py.File(input_filename, \"r\")\n handle = h5py.File(\n name=memory_mapped_name, driver=\"core\", backing_store=False, mode=\"w\"\n )\n handle_orig.copy(\"entry\", handle)\n\n # Add some simple datasets\n def create_scalar(handle, path, dtype, value):\n dataset = handle.create_dataset(path, (), dtype=dtype)\n dataset[()] = value\n\n # Add NXmx definition\n create_scalar(handle[\"entry\"], \"definition\", \"S4\", np.string_(\"NXmx\"))\n\n # Add saturation value\n try:\n create_scalar(\n handle[\"entry/instrument/detector\"],\n \"saturation_value\",\n \"int32\",\n handle[\n \"/entry/instrument/detector/detectorSpecific/countrate_correction_count_cutoff\"\n ],\n )\n except Exception:\n create_scalar(\n handle[\"entry/instrument/detector\"],\n \"saturation_value\",\n \"int32\",\n handle[\n \"entry/instrument/detector/detectorSpecific/detectorModule_000/countrate_correction_count_cutoff\"\n ],\n )\n\n # Add detector type\n create_scalar(\n handle[\"entry/instrument/detector\"], \"type\", \"S5\", np.string_(\"PIXEL\")\n )\n\n # Move the beam\n # print \"Copying /entry/instrument/beam to /entry/sample/beam\"\n handle.copy(\"/entry/instrument/beam\", \"/entry/sample/beam\")\n\n # Create detector module\n module_path = \"/entry/instrument/detector/module\"\n # print \"Creating detector module %s\" % (module_path)\n group = handle.create_group(module_path)\n group.attrs[\"NX_class\"] = np.string_(\"NXdetector_module\")\n\n # Add a module index\n create_scalar(group, \"module_index\", \"int64\", 0)\n\n # Create detector data origin\n dataset = group.create_dataset(\"data_origin\", (2,), dtype=\"int32\")\n dataset[0] = 0\n dataset[1] = 0\n\n # cope with badly structured chunk information i.e. many more data\n # entries than there are in real life...\n delete = []\n handle_orig_entry_properties = {}\n self.data_factory_cache = {}\n for k in sorted(handle_orig[\"/entry/data\"]):\n try:\n handle_orig_entry = handle_orig[\"/entry/data/%s\" % k]\n shape = handle_orig_entry.shape\n except KeyError:\n delete.append(\"/entry/data/%s\" % k)\n continue\n handle_orig_entry_properties[k] = {\n \"shape\": shape,\n \"length\": len(handle_orig_entry),\n \"filename\": handle_orig_entry.file.filename,\n }\n self.data_factory_cache[k] = DataFactoryCache(\n shape=shape,\n ndim=handle_orig_entry.ndim,\n filename=handle_orig_entry.file.filename,\n is_virtual=False,\n )\n for d in delete:\n del handle[d]\n\n # Create detector data size\n dataset = group.create_dataset(\"data_size\", (2,), dtype=\"int32\")\n dataset[0] = handle_orig_entry_properties[\"data_000001\"][\"shape\"][2]\n dataset[1] = handle_orig_entry_properties[\"data_000001\"][\"shape\"][1]\n\n depends_on = \"/entry/instrument/detector/transformations/translation\"\n\n # Add fast_pixel_size dataset\n # print \"Using /entry/instrument/detector/geometry/orientation/value as fast/slow pixel directions\"\n fast_axis = handle[\"/entry/instrument/detector/geometry/orientation/value\"][0:3]\n fast_axis = [\n fast_axis[0],\n fast_axis[1],\n -fast_axis[2],\n ] # swap Z axis to align with Dectris/NeXus documentation\n slow_axis = handle[\"/entry/instrument/detector/geometry/orientation/value\"][3:6]\n slow_axis = [\n slow_axis[0],\n slow_axis[1],\n -slow_axis[2],\n ] # swap Z axis to align with Dectris/NeXus documentation\n create_scalar(\n group,\n \"fast_pixel_direction\",\n \"float32\",\n handle[\"/entry/instrument/detector/x_pixel_size\"][()],\n )\n group[\"fast_pixel_direction\"].attrs[\"transformation_type\"] = np.string_(\n \"translation\"\n )\n group[\"fast_pixel_direction\"].attrs[\"vector\"] = fast_axis\n group[\"fast_pixel_direction\"].attrs[\"offset\"] = (0, 0, 0)\n group[\"fast_pixel_direction\"].attrs[\"units\"] = np.string_(\"m\")\n group[\"fast_pixel_direction\"].attrs[\"depends_on\"] = np.string_(depends_on)\n\n # Add slow_pixel_size dataset\n create_scalar(\n group,\n \"slow_pixel_direction\",\n \"float32\",\n handle[\"/entry/instrument/detector/y_pixel_size\"][()],\n )\n group[\"slow_pixel_direction\"].attrs[\"transformation_type\"] = np.string_(\n \"translation\"\n )\n group[\"slow_pixel_direction\"].attrs[\"vector\"] = slow_axis\n group[\"slow_pixel_direction\"].attrs[\"offset\"] = (0, 0, 0)\n group[\"slow_pixel_direction\"].attrs[\"units\"] = np.string_(\"m\")\n group[\"slow_pixel_direction\"].attrs[\"depends_on\"] = np.string_(depends_on)\n\n # Add module offset dataset\n # print \"Set module offset to be zero relative to detector\"\n create_scalar(group, \"module_offset\", \"float32\", 0)\n group[\"module_offset\"].attrs[\"transformation_type\"] = np.string_(\"translation\")\n group[\"module_offset\"].attrs[\"vector\"] = (0, 0, 0)\n group[\"module_offset\"].attrs[\"offset\"] = (0, 0, 0)\n group[\"module_offset\"].attrs[\"units\"] = np.string_(\"m\")\n group[\"module_offset\"].attrs[\"depends_on\"] = np.string_(depends_on)\n\n # Create detector depends_on\n create_scalar(\n handle[\"/entry/instrument/detector\"],\n \"depends_on\",\n \"S%d\" % len(depends_on),\n np.string_(depends_on),\n )\n\n # Add detector position\n # print \"Using /entry/instrument/detector/geometry/translation/distances as transformation\"\n detector_offset_vector = handle[\n \"/entry/instrument/detector/geometry/translation/distances\"\n ][()]\n # swap Z axis to align with Dectris/NeXus documentation\n detector_offset_vector = matrix.col(\n (\n detector_offset_vector[0],\n detector_offset_vector[1],\n -detector_offset_vector[2],\n )\n )\n group = handle.create_group(\"/entry/instrument/detector/transformations\")\n group.attrs[\"NX_class\"] = np.string_(\"NXtransformations\")\n create_scalar(group, \"translation\", \"float32\", detector_offset_vector.length())\n group[\"translation\"].attrs[\"transformation_type\"] = np.string_(\"translation\")\n if detector_offset_vector.length() > 0:\n group[\"translation\"].attrs[\"vector\"] = detector_offset_vector.normalize()\n else:\n group[\"translation\"].attrs[\"vector\"] = detector_offset_vector\n group[\"translation\"].attrs[\"offset\"] = 0\n group[\"translation\"].attrs[\"units\"] = np.string_(\"m\")\n group[\"translation\"].attrs[\"depends_on\"] = np.string_(\".\")\n\n # Create goniometer transformations if not found\n if \"/entry/sample/transformations\" not in handle:\n # print \"Creating group /entry/sample/transformation\"\n group = handle.create_group(\"/entry/sample/transformations\")\n group.attrs[\"NX_class\"] = np.string_(\"NXtransformations\")\n else:\n group = handle[\"/entry/sample/transformations\"]\n\n # check for incomplete omega definitions dirty hack...\n if \"omega\" in group:\n try:\n group[\"omega\"][()]\n except AttributeError:\n del group[\"omega\"]\n\n if \"omega\" not in group:\n # In here assume goniometer axis is 1,0,0 unless (i) specified somewhere\n # we can know or (ii) a known special case. For (i) for this instrument\n # listed here this is at\n #\n # /entry/sample/transformations/omega->vector\n #\n # which will join the special case list once this is properly resolved\n # as this corrently returns 0, -1, 0 so needs transforming...\n #\n # special cases:\n # E-32-0105 - Max IV, vertical axis\n\n key = handle[\"/entry/instrument/detector/detector_number\"][()]\n default_axis = {b\"E-32-0105\": (0, 1, 0)}.get(key, (-1, 0, 0))\n\n num_images = 0\n for name in sorted(handle[\"/entry/data\"]):\n num_images += handle_orig_entry_properties[name][\"length\"]\n dataset = group.create_dataset(\"omega\", (num_images,), dtype=\"float32\")\n dataset.attrs[\"units\"] = np.string_(\"degree\")\n dataset.attrs[\"transformation_type\"] = np.string_(\"rotation\")\n dataset.attrs[\"vector\"] = default_axis\n dataset.attrs[\"offset\"] = 0\n dataset.attrs[\"depends_on\"] = np.string_(\".\")\n omega_range_average = handle[\n \"/entry/sample/goniometer/omega_range_average\"\n ][()]\n omega_range_average = int(omega_range_average * 100 + 0.5) / 100.0\n for i in range(num_images):\n angle = omega_range_average * i\n dataset[i] = angle\n else:\n dataset = group[\"omega\"]\n\n if \"depends_on\" not in handle[\"/entry/sample\"]:\n # Create sample depends_on\n create_scalar(\n handle[\"/entry/sample\"],\n \"depends_on\",\n \"S%d\" % len(dataset.name),\n np.string_(dataset.name),\n )\n\n # Change relative paths to absolute paths\n for name in handle[\"/entry/data\"]:\n del handle[\"entry/data\"][name]\n filename = handle_orig_entry_properties[name][\"filename\"]\n handle[\"entry/data\"][name] = h5py.ExternalLink(filename, \"entry/data/data\")\n handle[\"entry/data\"][\"_filename_\" + name] = filename # Store file names\n\n self.handle = handle\n self.handle_orig = handle_orig\n\n\nclass FormatHDF5EigerNearlyNexus(FormatHDF5):\n @staticmethod\n def understand(image_file):\n try:\n return is_eiger_nearly_nexus_file(image_file)\n except IOError:\n return False\n\n def _start(self):\n # Read the file structure\n temp_file = \"tmp_master_%s.nxs\" % uuid.uuid1().hex\n fixer = EigerNXmxFixer(self._image_file, temp_file)\n reader = NXmxReader(handle=fixer.handle)\n\n # Only support 1 set of models at the moment\n assert len(reader.entries) == 1, \"Currently only supports 1 NXmx entry\"\n assert len(reader.entries[0].data) == 1, \"Currently only supports 1 NXdata\"\n assert (\n len(reader.entries[0].instruments) == 1\n ), \"Currently only supports 1 NXinstrument\"\n assert len(reader.entries[0].samples) == 1, \"Currently only supports 1 NXsample\"\n assert (\n len(reader.entries[0].instruments[0].detectors) == 1\n ), \"Currently only supports 1 NXdetector\"\n assert (\n len(reader.entries[0].instruments[0].detectors[0].modules) == 1\n ), \"Currently only supports 1 NXdetector_module\"\n assert (\n len(reader.entries[0].samples[0].beams) == 1\n or len(reader.entries[0].instruments[0].beams) == 1\n ), \"Currently only supports 1 NXbeam\"\n\n # Get the NXmx model objects\n entry = reader.entries[0]\n self.instrument = instrument = entry.instruments[0]\n detector = instrument.detectors[0]\n sample = entry.samples[0]\n beam = sample.beams[0] if sample.beams else instrument.beams[0]\n\n # Use data from original master file\n data = NXdata(fixer.handle_orig[entry.data[0].handle.name])\n self._raw_data = DataFactory(data, cached_information=fixer.data_factory_cache)\n\n # Construct the models\n self._beam_factory = BeamFactory(beam)\n self._beam_factory.load_model(0)\n self._detector_model = DetectorFactory(\n detector, self._beam_factory.model, shape=self._raw_data.shape()\n ).model\n\n # Override the minimum trusted value - for Eiger should be -1\n for panel in self._detector_model:\n trusted = panel.get_trusted_range()\n panel.set_trusted_range((-1, trusted[1]))\n\n self._goniometer_model = GoniometerFactory(sample).model\n self._scan_model = generate_scan_model(sample, detector)\n\n # update model for masking Eiger detectors\n for f0, f1, s0, s1 in determine_eiger_mask(self._detector_model):\n self._detector_model[0].add_mask(f0 - 1, s0 - 1, f1, s1)\n\n def _end(self):\n return\n\n def _goniometer(self):\n return self._goniometer_model\n\n def _detector(self):\n return self._detector_model\n\n def _beam(self, index=None):\n self._beam_model, _ = self._beam_factory.read_models(index)\n return self._beam_model\n\n def _scan(self):\n return self._scan_model\n\n def get_goniometer(self, index=None):\n return self._goniometer()\n\n def get_detector(self, index=None):\n return self._detector()\n\n def get_beam(self, index=None):\n return self._beam(index)\n\n def get_scan(self, index=None):\n if index is None:\n return self._scan()\n return self._scan()[index]\n\n def get_raw_data(self, index):\n return self._raw_data[index]\n\n def get_static_mask(self, index=None, goniometer=None):\n return MaskFactory(self.instrument.detectors, index).mask\n\n def get_num_images(self):\n scan = self._scan()\n if isinstance(scan, list):\n return sum(s.get_num_images() for s in scan)\n return scan.get_num_images()\n\n def get_image_file(self, index=None):\n return self._image_file\n\n def detectorbase_start(self, index=0):\n self.detectorbase = EIGERImage(self._image_file, index=index)\n self.detectorbase.readHeader(dxtbx_instance=self)\n\n def model_get_raw_data(ptr, index):\n return self.get_raw_data(index)\n\n self.detectorbase.get_raw_data_callback = model_get_raw_data\n\n def get_detectorbase(self, index=0):\n self.detectorbase_start(index)\n return self.detectorbase\n\n def get_vendortype(self):\n return gv(self.get_detector())\n\n\nif __name__ == \"__main__\":\n f = FormatHDF5EigerNearlyNexus(sys.argv[1])\n for i in range(10):\n print(f.get_raw_data(i))\n"
] | [
[
"numpy.string_"
]
] |
atward424/ASCVD_ML | [
"39404dd5f50a527576b91e8f53f5157f76382712"
] | [
"debug_permutation_importance.py"
] | [
"from __future__ import absolute_import\r\nfrom typing import Tuple, List, Callable, Any\r\n\r\nimport numpy as np # type: ignore\r\nfrom sklearn.utils import check_random_state # type: ignore\r\nimport matplotlib.pyplot as plt\r\n\r\nimport pickle\r\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\r\n\r\ndef iter_shuffled(X, columns_to_shuffle=None, pre_shuffle=False,\r\n random_state=None):\r\n \"\"\"\r\n Return an iterator of X matrices which have one or more columns shuffled.\r\n After each iteration yielded matrix is mutated inplace, so\r\n if you want to use multiple of them at the same time, make copies.\r\n\r\n ``columns_to_shuffle`` is a sequence of column numbers to shuffle.\r\n By default, all columns are shuffled once, i.e. columns_to_shuffle\r\n is ``range(X.shape[1])``.\r\n\r\n If ``pre_shuffle`` is True, a copy of ``X`` is shuffled once, and then\r\n result takes shuffled columns from this copy. If it is False,\r\n columns are shuffled on fly. ``pre_shuffle = True`` can be faster\r\n if there is a lot of columns, or if columns are used multiple times.\r\n \"\"\"\r\n rng = check_random_state(random_state)\r\n\r\n if columns_to_shuffle is None:\r\n columns_to_shuffle = range(X.shape[1])\r\n\r\n if pre_shuffle:\r\n X_shuffled = X.copy()\r\n rng.shuffle(X_shuffled)\r\n\r\n X_res = X.copy()\r\n for columns in columns_to_shuffle:\r\n if pre_shuffle:\r\n X_res[:, columns] = X_shuffled[:, columns]\r\n else:\r\n rng.shuffle(X_res[:, columns])\r\n yield X_res\r\n X_res[:, columns] = X[:, columns]\r\n\r\n\r\n\r\ndef get_score_importances(\r\n score_func, # type: Callable[[Any, Any], float]\r\n X,\r\n y,\r\n n_iter=5, # type: int\r\n columns_to_shuffle=None,\r\n random_state=None\r\n ):\r\n # type: (...) -> Tuple[float, List[np.ndarray]]\r\n \"\"\"\r\n Return ``(base_score, score_decreases)`` tuple with the base score and\r\n score decreases when a feature is not available.\r\n\r\n ``base_score`` is ``score_func(X, y)``; ``score_decreases``\r\n is a list of length ``n_iter`` with feature importance arrays\r\n (each array is of shape ``n_features``); feature importances are computed\r\n as score decrease when a feature is not available.\r\n\r\n ``n_iter`` iterations of the basic algorithm is done, each iteration\r\n starting from a different random seed.\r\n\r\n If you just want feature importances, you can take a mean of the result::\r\n\r\n import numpy as np\r\n from eli5.permutation_importance import get_score_importances\r\n\r\n base_score, score_decreases = get_score_importances(score_func, X, y)\r\n feature_importances = np.mean(score_decreases, axis=0)\r\n\r\n \"\"\"\r\n rng = check_random_state(random_state)\r\n base_score = score_func(X, y)\r\n scores_decreases = []\r\n for i in range(n_iter):\r\n scores_shuffled = _get_scores_shufled(\r\n score_func, X, y, columns_to_shuffle=columns_to_shuffle,\r\n random_state=rng\r\n )\r\n scores_decreases.append(-scores_shuffled + base_score)\r\n return base_score, scores_decreases\r\n\r\n\r\n\r\ndef _get_scores_shufled(score_func, X, y, columns_to_shuffle=None,\r\n random_state=None):\r\n Xs = iter_shuffled(X, columns_to_shuffle, random_state=random_state)\r\n return np.array([score_func(X_shuffled, y) for X_shuffled in Xs])\r\n\r\n\r\n# -*- coding: utf-8 -*-\r\nfrom functools import partial\r\nfrom typing import List\r\n\r\nimport numpy as np # type: ignore\r\nfrom sklearn.model_selection import check_cv # type: ignore\r\nfrom sklearn.utils.metaestimators import if_delegate_has_method # type: ignore\r\nfrom sklearn.utils import check_array, check_random_state # type: ignore\r\nfrom sklearn.base import ( # type: ignore\r\n BaseEstimator,\r\n MetaEstimatorMixin,\r\n clone,\r\n is_classifier\r\n)\r\nfrom sklearn.metrics.scorer import check_scoring # type: ignore\r\n\r\n# from eli5.permutation_importance import get_score_importances\r\n#from eli5.sklearn.utils import pandas_available\r\nimport pandas as pd # type: ignore\r\npandas_available = True\r\n\r\nCAVEATS_CV_NONE = \"\"\"\r\nFeature importances are computed on the same data as used for training, \r\ni.e. feature importances don't reflect importance of features for \r\ngeneralization.\r\n\"\"\"\r\n\r\nCAVEATS_CV = \"\"\"\r\nFeature importances are not computed for the final estimator; \r\nthey are computed for a sequence of estimators trained and evaluated \r\non train/test splits. So they tell you about importances of features \r\nfor generalization, but not feature importances of a particular trained model.\r\n\"\"\"\r\n\r\nCAVEATS_PREFIT = \"\"\"\r\nIf feature importances are computed on the same data as used for training, \r\nthey don't reflect importance of features for generalization. Use a held-out\r\ndataset if you want generalization feature importances.\r\n\"\"\"\r\n\r\n\r\nclass PermutationImportance(BaseEstimator, MetaEstimatorMixin):\r\n \"\"\"Meta-estimator which computes ``feature_importances_`` attribute\r\n based on permutation importance (also known as mean score decrease).\r\n\r\n :class:`~PermutationImportance` instance can be used instead of\r\n its wrapped estimator, as it exposes all estimator's common methods like\r\n ``predict``.\r\n\r\n There are 3 main modes of operation:\r\n\r\n 1. cv=\"prefit\" (pre-fit estimator is passed). You can call\r\n PermutationImportance.fit either with training data, or\r\n with a held-out dataset (in the latter case ``feature_importances_``\r\n would be importances of features for generalization). After the fitting\r\n ``feature_importances_`` attribute becomes available, but the estimator\r\n itself is not fit again. When cv=\"prefit\",\r\n :meth:`~PermutationImportance.fit` must be called\r\n directly, and :class:`~PermutationImportance` cannot be used with\r\n ``cross_val_score``, ``GridSearchCV`` and similar utilities that clone\r\n the estimator.\r\n 2. cv=None. In this case :meth:`~PermutationImportance.fit` method fits\r\n the estimator and computes feature importances on the same data, i.e.\r\n feature importances don't reflect importance of features for\r\n generalization.\r\n 3. all other ``cv`` values. :meth:`~PermutationImportance.fit` method\r\n fits the estimator, but instead of computing feature importances for\r\n the concrete estimator which is fit, importances are computed for\r\n a sequence of estimators trained and evaluated on train/test splits\r\n according to ``cv``, and then averaged. This is more resource-intensive\r\n (estimators are fit multiple times), and importances are not computed\r\n for the final estimator, but ``feature_importances_`` show importances\r\n of features for generalization.\r\n\r\n Mode (1) is most useful for inspecting an existing estimator; modes\r\n (2) and (3) can be also used for feature selection, e.g. together with\r\n sklearn's SelectFromModel or RFE.\r\n\r\n Currently :class:`~PermutationImportance` works with dense data.\r\n\r\n Parameters\r\n ----------\r\n estimator : object\r\n The base estimator. This can be both a fitted\r\n (if ``prefit`` is set to True) or a non-fitted estimator.\r\n\r\n scoring : string, callable or None, default=None\r\n Scoring function to use for computing feature importances.\r\n A string with scoring name (see scikit-learn docs) or\r\n a scorer callable object / function with signature\r\n ``scorer(estimator, X, y)``.\r\n If ``None``, the ``score`` method of the estimator is used.\r\n\r\n n_iter : int, default 5\r\n Number of random shuffle iterations. Decrease to improve speed,\r\n increase to get more precise estimates.\r\n\r\n random_state : integer or numpy.random.RandomState, optional\r\n random state\r\n\r\n cv : int, cross-validation generator, iterable or \"prefit\"\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to disable cross-validation and compute feature importances\r\n on the same data as used for training.\r\n - integer, to specify the number of folds.\r\n - An object to be used as a cross-validation generator.\r\n - An iterable yielding train/test splits.\r\n - \"prefit\" string constant (default).\r\n\r\n If \"prefit\" is passed, it is assumed that ``estimator`` has been\r\n fitted already and all data is used for computing feature importances.\r\n\r\n refit : bool\r\n Whether to fit the estimator on the whole data if cross-validation\r\n is used (default is True).\r\n\r\n Attributes\r\n ----------\r\n feature_importances_ : array\r\n Feature importances, computed as mean decrease of the score when\r\n a feature is permuted (i.e. becomes noise).\r\n\r\n feature_importances_std_ : array\r\n Standard deviations of feature importances.\r\n\r\n results_ : list of arrays\r\n A list of score decreases for all experiments.\r\n\r\n scores_ : array of float\r\n A list of base scores for all experiments (with no features permuted).\r\n\r\n estimator_ : an estimator\r\n The base estimator from which the :class:`~PermutationImportance`\r\n instance is built. This is stored only when a non-fitted estimator\r\n is passed to the :class:`~PermutationImportance`, i.e when ``cv`` is\r\n not \"prefit\".\r\n\r\n rng_ : numpy.random.RandomState\r\n random state\r\n \"\"\"\r\n def __init__(self, estimator, scoring=None, n_iter=5, random_state=None,\r\n cv='prefit', refit=True):\r\n # type: (...) -> None\r\n if isinstance(cv, str) and cv != \"prefit\":\r\n raise ValueError(\"Invalid cv value: {!r}\".format(cv))\r\n self.refit = refit\r\n self.estimator = estimator\r\n self.scoring = scoring\r\n self.n_iter = n_iter\r\n self.random_state = random_state\r\n self.cv = cv\r\n self.rng_ = check_random_state(random_state)\r\n\r\n def _wrap_scorer(self, base_scorer, pd_columns):\r\n def pd_scorer(model, X, y):\r\n X = pd.DataFrame(X, columns=pd_columns)\r\n return base_scorer(model, X, y)\r\n return pd_scorer\r\n\r\n def fit(self, X, y, groups=None, **fit_params):\r\n # type: (...) -> PermutationImportance\r\n \"\"\"Compute ``feature_importances_`` attribute and optionally\r\n fit the base estimator.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n The training input samples.\r\n\r\n y : array-like, shape (n_samples,)\r\n The target values (integers that correspond to classes in\r\n classification, real numbers in regression).\r\n\r\n groups : array-like, with shape (n_samples,), optional\r\n Group labels for the samples used while splitting the dataset into\r\n train/test set.\r\n\r\n **fit_params : Other estimator specific parameters\r\n\r\n Returns\r\n -------\r\n self : object\r\n Returns self.\r\n \"\"\"\r\n self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)\r\n\r\n if pandas_available and isinstance(X, pd.DataFrame):\r\n self.scorer_ = self._wrap_scorer(self.scorer_, X.columns)\r\n\r\n if self.cv != \"prefit\" and self.refit:\r\n self.estimator_ = clone(self.estimator)\r\n self.estimator_.fit(X, y, **fit_params)\r\n\r\n X = check_array(X)\r\n\r\n if self.cv not in (None, \"prefit\"):\r\n si = self._cv_scores_importances(X, y, groups=groups, **fit_params)\r\n else:\r\n si = self._non_cv_scores_importances(X, y)\r\n scores, results = si\r\n self.scores_ = np.array(scores)\r\n self.results_ = results\r\n self.feature_importances_ = np.mean(results, axis=0)\r\n self.feature_importances_std_ = np.std(results, axis=0)\r\n return self\r\n\r\n\r\n def _cv_scores_importances(self, X, y, groups=None, **fit_params):\r\n assert self.cv is not None\r\n cv = check_cv(self.cv, y, is_classifier(self.estimator))\r\n feature_importances = [] # type: List\r\n base_scores = [] # type: List[float]\r\n for train, test in cv.split(X, y, groups):\r\n est = clone(self.estimator).fit(X[train], y[train], **fit_params)\r\n score_func = partial(self.scorer_, est)\r\n _base_score, _importances = self._get_score_importances(\r\n score_func, X[test], y[test])\r\n base_scores.extend([_base_score] * len(_importances))\r\n feature_importances.extend(_importances)\r\n return base_scores, feature_importances\r\n\r\n def _non_cv_scores_importances(self, X, y):\r\n score_func = partial(self.scorer_, self.wrapped_estimator_)\r\n base_score, importances = self._get_score_importances(score_func, X, y)\r\n return [base_score] * len(importances), importances\r\n\r\n def _get_score_importances(self, score_func, X, y):\r\n return get_score_importances(score_func, X, y, n_iter=self.n_iter,\r\n random_state=self.rng_)\r\n\r\n @property\r\n def caveats_(self):\r\n # type: () -> str\r\n if self.cv == 'prefit':\r\n return CAVEATS_PREFIT\r\n elif self.cv is None:\r\n return CAVEATS_CV_NONE\r\n return CAVEATS_CV\r\n\r\n # ============= Exposed methods of a wrapped estimator:\r\n\r\n @if_delegate_has_method(delegate='wrapped_estimator_')\r\n def score(self, X, y=None, *args, **kwargs):\r\n return self.wrapped_estimator_.score(X, y, *args, **kwargs)\r\n\r\n @if_delegate_has_method(delegate='wrapped_estimator_')\r\n def predict(self, X):\r\n return self.wrapped_estimator_.predict(X)\r\n\r\n @if_delegate_has_method(delegate='wrapped_estimator_')\r\n def predict_proba(self, X):\r\n return self.wrapped_estimator_.predict_proba(X)\r\n\r\n @if_delegate_has_method(delegate='wrapped_estimator_')\r\n def predict_log_proba(self, X):\r\n return self.wrapped_estimator_.predict_log_proba(X)\r\n\r\n @if_delegate_has_method(delegate='wrapped_estimator_')\r\n def decision_function(self, X):\r\n return self.wrapped_estimator_.decision_function(X)\r\n\r\n @property\r\n def wrapped_estimator_(self):\r\n if self.cv == \"prefit\" or not self.refit:\r\n return self.estimator\r\n return self.estimator_\r\n\r\n @property\r\n def _estimator_type(self):\r\n return self.estimator._estimator_type\r\n\r\n @property\r\n def classes_(self):\r\n return self.wrapped_estimator_.classes_\r\n\r\n\r\ndef get_column_inds_from_names(df_column_names, names_to_replace):\r\n replace_inds = []\r\n for n2r in names_to_replace:\r\n replace_inds.append([df_column_names.get_loc(c) for c in n2r])\r\n return(replace_inds)\r\n \r\n \r\ndef variable_importance_plot(feature_names, feat_importances, err=None, keep_top = None):\r\n \"\"\"\r\n Purpose\r\n ----------\r\n Prints bar chart detailing variable importance for CART model\r\n NOTE: feature_space list was created because the bar chart\r\n was transposed and index would be in incorrect order.\r\n\r\n Parameters\r\n ----------\r\n * importance: Array returned from feature_importances_ for CART\r\n models organized by dataframe index\r\n\r\n Returns:\r\n ----------\r\n Returns variable importance plot in descending order\r\n \"\"\"\r\n# index = np.arange(len(names_index))\r\n\r\n# importance_desc = sorted(importance)\r\n# feature_space = []\r\n\r\n# for i in range(indices.shape[0] - 1, -1, -1):\r\n# feature_space.append(names_index[indices[i]])\r\n fig, ax = plt.subplots(figsize=(7.5, 12))\r\n \r\n if err is None:\r\n err = np.zeros(len(feat_importances))\r\n feature_importances = pd.DataFrame([feat_importances, err], columns=feature_names)\r\n importances_df = feature_importances.sort_values(by=0, axis=1, ascending=True, inplace=False, kind='quicksort', na_position='last').T\r\n importances_df.columns = ['imps', 'err']\r\n if keep_top is not None:\r\n importances_df = importances_df.iloc[(-1*keep_top):]\r\n# ax.set_axis_bgcolor('#fafafa')\r\n ax.barh(importances_df.index,\r\n importances_df.imps,\r\n xerr=importances_df.err, \r\n alpha = 0.9, \r\n edgecolor = \"black\", \r\n zorder=3, \r\n color='lightblue'\r\n )\r\n# align=\"center\",\r\n# color = '#875FDB')\r\n# plt.yticks(index,\r\n# feature_space)\r\n\r\n# plt.ylim(-1, 30)\r\n# plt.xlim(0, max(importance_desc) + 0.01)\r\n ax.set_ylabel('Feature')\r\n\r\n fig.subplots_adjust(left=0.3)\r\n fig.tight_layout()\r\n return ax, fig\r\n\r\n#names_of_feats_all = []\r\n#for feat_group in feature_space.columns:\r\n# for feat_dict in PATIENT_FEATURES_CONFIG:\r\n# if feat_dict['name'] == feat_group:\r\n# names_of_feats_all.append(feat_dict['formatted_name'])\r\n# break\r\n\r\n\r\n\r\n\r\n#feat_list = [['agebl'],\r\n#['female'],\r\n#['race'],\r\n#['hdlchol'],\r\n#['totchol'],\r\n#['systolic'],\r\n#['t2d_history'],\r\n#['bp_antihtn'],\r\n#['cursmk_ever'],\r\n#['ldlchol'],\r\n#['diastolic'],\r\n#['wt'],\r\n#['ht'],\r\n#['medhousincome'],\r\n#['primarycarevsts'],\r\n#['otherservicevsts'],\r\n#['specialtycarevsts'],\r\n#['total_medications'],\r\n#['education5'],\r\n#['education3'],\r\n#['education4'],\r\n#['education6'],\r\n#['education1'],\r\n#['education2'],\r\n#['normal_tests'],\r\n#['abnormal_tests'],\r\n#['CCS_158'],\r\n#['CCS_98'],\r\n#['MONO_1'],\r\n#['CCS_5'],\r\n#['PSA_0'],\r\n#['LYMPH_1'],\r\n#['CCS_79'],\r\n#['MED_4799'],\r\n#['MED_3320'],\r\n#['MED_1630'],\r\n#['EOS_0'],\r\n#['CCS_102'],\r\n#['CCS_8'],\r\n#['MED_3615'],\r\n#['CCS_96'],\r\n#['MED_9646'],\r\n#['MED_6205'],\r\n#['CALCIUM_0'],\r\n#['MED_8672'],\r\n#['MED_6410'],\r\n#['EOS_1'],\r\n#['CCS_33'],\r\n#['BASO_0'],\r\n#['CCS_63'],\r\n#['GLU_1'],\r\n#['CCS_59'],\r\n#['GFR_1'],\r\n#['CRP_1'],\r\n#['CCS_51'],\r\n#['CCS_204'],\r\n#['CCS_95'],\r\n#['CCS_653'],\r\n#['CCS_64'],\r\n#['CCS_244'],\r\n#['CCS_97'],\r\n#['MED_3999'],\r\n#['U_ACR_1'],\r\n#['MED_8625'],\r\n#['K_0'],\r\n#['MED_4630'],\r\n#['U_PROT_1'],\r\n#['MED_4155'],\r\n#['BILI_0'],\r\n#['CCS_83'],\r\n#['BILI_1'],\r\n#['CCS_2'],\r\n#['MED_1220'],\r\n#['MED_0310'],\r\n#['MED_5940'],\r\n#['CCS_11'],\r\n#['CCS_660'],\r\n#['MED_9066'],\r\n#['CCS_104'],\r\n#['MED_3720'],\r\n#['MED_7710'],\r\n#['MED_4240'],\r\n#['CCS_115'],\r\n#['AST_0'],\r\n#['CCS_216'],\r\n#['MED_3760'],\r\n#['CCS_211'],\r\n#['MED_0700'],\r\n#['T4_1'],\r\n#['FIBRINOGEN_1'],\r\n#['BUN_1'],\r\n#['MED_8230'],\r\n#['CCS_152'],\r\n#['CCS_49'],\r\n#['CCS_50'],\r\n#['CCS_651'],\r\n#['CCS_199'],\r\n#['MED_3610'],\r\n#['CCS_99'],\r\n#['MED_4920'],\r\n#['MED_0199'],\r\n#['MED_4650'],\r\n#['Emphysema'],\r\n#['MED_3940'],\r\n#['MED_0230'],\r\n#['MED_9940'],\r\n#['MED_7813'],\r\n#['U_MICALB24_1']]\r\n#\r\n#feat_names = ['agebl',\r\n#'female',\r\n#'race',\r\n#'hdlchol',\r\n#'totchol',\r\n#'systolic',\r\n#'t2d_history',\r\n#'bp_antihtn',\r\n#'cursmk_ever',\r\n#'ldlchol',\r\n#'diastolic',\r\n#'wt',\r\n#'ht',\r\n#'medhousincome',\r\n#'primarycarevsts',\r\n#'otherservicevsts',\r\n#'specialtycarevsts',\r\n#'total_medications',\r\n#'education5',\r\n#'education3',\r\n#'education4',\r\n#'education6',\r\n#'education1',\r\n#'education2',\r\n#'normal_tests',\r\n#'abnormal_tests',\r\n#'CCS_158',\r\n#'CCS_98',\r\n#'MONO_1',\r\n#'CCS_5',\r\n#'PSA_0',\r\n#'LYMPH_1',\r\n#'CCS_79',\r\n#'MED_4799',\r\n#'MED_3320',\r\n#'MED_1630',\r\n#'EOS_0',\r\n#'CCS_102',\r\n#'CCS_8',\r\n#'MED_3615',\r\n#'CCS_96',\r\n#'MED_9646',\r\n#'MED_6205',\r\n#'CALCIUM_0',\r\n#'MED_8672',\r\n#'MED_6410',\r\n#'EOS_1',\r\n#'CCS_33',\r\n#'BASO_0',\r\n#'CCS_63',\r\n#'GLU_1',\r\n#'CCS_59',\r\n#'GFR_1',\r\n#'CRP_1',\r\n#'CCS_51',\r\n#'CCS_204',\r\n#'CCS_95',\r\n#'CCS_653',\r\n#'CCS_64',\r\n#'CCS_244',\r\n#'CCS_97',\r\n#'MED_3999',\r\n#'U_ACR_1',\r\n#'MED_8625',\r\n#'K_0',\r\n#'MED_4630',\r\n#'U_PROT_1',\r\n#'MED_4155',\r\n#'BILI_0',\r\n#'CCS_83',\r\n#'BILI_1',\r\n#'CCS_2',\r\n#'MED_1220',\r\n#'MED_0310',\r\n#'MED_5940',\r\n#'CCS_11',\r\n#'CCS_660',\r\n#'MED_9066',\r\n#'CCS_104',\r\n#'MED_3720',\r\n#'MED_7710',\r\n#'MED_4240',\r\n#'CCS_115',\r\n#'AST_0',\r\n#'CCS_216',\r\n#'MED_3760',\r\n#'CCS_211',\r\n#'MED_0700',\r\n#'T4_1',\r\n#'FIBRINOGEN_1',\r\n#'BUN_1',\r\n#'MED_8230',\r\n#'CCS_152',\r\n#'CCS_49',\r\n#'CCS_50',\r\n#'CCS_651',\r\n#'CCS_199',\r\n#'MED_3610',\r\n#'CCS_99',\r\n#'MED_4920',\r\n#'MED_0199',\r\n#'MED_4650',\r\n#'Emphysema',\r\n#'MED_3940',\r\n#'MED_0230',\r\n#'MED_9940',\r\n#'MED_7813',\r\n#'U_MICALB24_1']\r\n\r\n#names_of_feats = []\r\n#for feat_group in feat_list:\r\n# for feat_dict in PATIENT_FEATURES_CONFIG:\r\n# if feat_dict['name'] == feat_group[0]:\r\n# names_of_feats.append(feat_dict['formatted_name'])\r\n# break\r\n# \r\n#names_of_feats[0] = 'Clinic Location'\r\n#names_of_feats[1] = 'Clinic Urban/Rural'\r\n#names_of_feats[2] = 'Ethnicity'\r\n#names_of_feats[3] = 'Insurance Type'\r\n#%%\r\nresult_dir = '../Results/allvars_pce_pts_0506/'\r\nimport os\r\nif not os.path.isdir(os.path.dirname(result_dir)): os.mkdir(os.path.dirname(result_dir))\r\n#result_dir = '../Results/allvars_pce_pts_0925/'\r\n#best_model = 'gbm'\r\n#from joblib import dump, load\r\n#result_dir = '../Results/allvars_oldyoung_missing_0913/'\r\n#best_model = 'gbm'\r\n#model = load(result_dir + best_model + '_best_model.joblib')\r\nrun_date_str = '0507'\r\n\r\n#feat_import_df = pd.read_csv(result_dir + best_model + \"_feature_importances.csv\")\r\n##%%\r\n#feat_names = [f for f in feat_import_df.feature if '_missing' not in f]\r\n#feat_list = [[f] for f in feat_names]\r\n##%%\r\n#ax, fig = variable_importance_plot(feat_import_df.feature, feat_import_df.importance.values, keep_top = 30)\r\n#ax.set_title('Feature importances for GBM: Impurity')\r\n#ax.set_xlabel('Mean Decrease in Impurity');\r\n#plt.tight_layout()\r\n#plt.savefig(f'{result_dir}feature_importances_{best_model}_impurity_{run_date_str}.png', dpi = 500)\r\n\r\n\r\n#%%\r\n\r\nfrom sklearn.experimental import enable_iterative_imputer\r\nfrom sklearn.impute import IterativeImputer\r\nfrom medical_ML import split_cohort\r\nfrom datetime import datetime\r\ntest_ind_col = 'test_ind'\r\nlabel = 'ascvdany5y'\r\nto_exclude = {\r\n 'pce_cohort': False,\r\n 'pce_invalid_vars': True,\r\n 'cvd_bl': True,\r\n 'antilpd': True,\r\n 'oldyoung': True}\r\ndatafile = 'allvars.csv'\r\nascvd_est = pd.read_csv('../Data/cohort/' + datafile)\r\n#%%\r\ntrain_est2, test_est2 = split_cohort(ascvd_est, to_exclude, test_ind_col, drop = 'all')\r\ntest_set_data = pd.get_dummies(test_est2, columns = [c for c in test_est2.columns if test_est2[c].dtype=='O'])\r\ntrain_set_data = pd.get_dummies(train_est2, columns = [c for c in train_est2.columns if train_est2[c].dtype=='O'])\r\ntrain_set_features = train_set_data[[f for f in train_set_data.columns if f != label]]\r\ntest_set_features = test_set_data[[f for f in test_set_data.columns if f != label]]\r\ntrain_set_labels = train_est2[label]\r\ntest_set_labels = test_est2[label]\r\ntrain_est2 = test_est2 = ascvd_est = None\r\nimp = IterativeImputer(add_indicator=False,\r\n estimator=None,\r\n imputation_order='ascending',\r\n initial_strategy='mean',\r\n max_iter=50, max_value=None,\r\n min_value=None,\r\n missing_values=np.nan,\r\n n_nearest_features=10,\r\n random_state=None,\r\n sample_posterior=False,\r\n tol=0.001, verbose=0)\r\nimp.fit(train_set_features)\r\ntrain_set_imp_features = imp.transform(train_set_features)\r\ntrain_set_imp_features = pd.DataFrame(train_set_imp_features, columns = train_set_features.columns)\r\ntest_set_imp_features = imp.transform(test_set_features)\r\ntest_set_imp_features = pd.DataFrame(test_set_imp_features, columns = test_set_features.columns)\r\ntrain_set_features = test_set_features = None\r\n#%%\r\n#fl2 = [[fl[0]] for fl in feat_list if 'race' not in fl[0]]\r\n#\r\n#fl2.append(['race'])\r\n#%%\r\n#gbm = model.named_steps['predictor']\r\n#gbm.n_features_ = test_set_features.shape[1]\r\n#parms = gbm.get_params()\r\n#model.named_steps['predictor'].n_features = test_set_features.shape[1]\r\nparms = {'n_estimators': 300,\r\n 'learning_rate': 0.01,\r\n 'max_depth': 5,\r\n 'subsample': 0.35,\r\n 'max_features': 0.25}\r\nprint('training GBM')\r\nnow = datetime.now()\r\ngbm2 = GradientBoostingClassifier(**parms)\r\nprint(train_set_imp_features.columns)\r\ngbm2.fit(train_set_imp_features, train_set_labels)\r\ndifference = (datetime.now() - now).total_seconds()\r\nprint('done, total seconds:', difference)\r\n#%%\r\nax, fig = variable_importance_plot(train_set_imp_features.columns, gbm2.feature_importances_, keep_top = 30)\r\n\r\nax.set_title('Feature importances for GBM model: Permutation Importance')\r\nax.set_xlabel('Mean Decrease in AUC')\r\nplt.tight_layout()\r\nplt.savefig(f'{result_dir}feat_imps_gini_{run_date_str}_100.png', dpi = 500)\r\n#dump(gbm2)\r\n#%%\r\nprint('calculating permutation importance')\r\nnow = datetime.now()\r\nfeat_names = [f for f in test_set_imp_features.columns if '_missing' not in f]\r\nfeat_list = [[f] for f in feat_names]\r\nperm = PermutationImportance(gbm2, n_iter=5).fit(test_set_imp_features, test_set_labels)\r\ndifference = (datetime.now() - now).total_seconds()\r\nprint('done, total seconds:', difference)\r\nwith open(f'{result_dir}permutation_feat_importances_all_test_{run_date_str}_5.pkl', \"wb\") as output_file:\r\n pickle.dump([perm.results_, perm.feature_importances_, perm.feature_importances_std_], output_file)\r\n #%%\r\nax, fig = variable_importance_plot(feat_names, perm.feature_importances_, err=perm.feature_importances_std_, keep_top = 30)\r\n\r\nax.set_title('Feature importances for GBM model: Permutation Importance')\r\nax.set_xlabel('Mean Decrease in AUC')\r\nplt.tight_layout()\r\nplt.savefig(f'{result_dir}feat_imps_permutation_test_{run_date_str}_100.png', dpi = 500)\r\n\r\n\r\n## Create horizontal bars\r\n#y_pos = np.arange(len(top_features_union))\r\n#\r\n#fig, ax = plt.subplots(figsize=(10,8))\r\n#ax.xaxis.grid(True, zorder=0)\r\n#width = 0.40\r\n#\r\n#offset_fix = np.zeros(len(top_features_union))\r\n#offset_fix[top_var_imp_red == 0]= -width/2\r\n##top_var_imp/np.max(top_var_imp) * 100 top_var_imp_red/np.max(top_var_imp_red) * 100 , width\r\n#\r\n#plt.barh(y_pos+width/2 + offset_fix, var_imp_df_top['relative importance'] , width, alpha = 0.5, edgecolor = \"black\", zorder=3, color='tab:grey')\r\n#plt.barh(y_pos-width/2, var_imp_df_red_top['relative importance'] ,width, alpha = 0.5, edgecolor = \"black\", zorder=3, color='tab:blue')\r\n# \r\n## Create names on the y-axis\r\n#plt.yticks(y_pos, top_features)\r\n#\r\n#plt.xlabel('Relative Importance (%)')\r\n#plt.xlim(0, 100)\r\n#plt.legend([ 'All variables','Bedside variables'])\r\n#plt.tight_layout()"
] | [
[
"numpy.array",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"sklearn.impute.IterativeImputer",
"sklearn.utils.metaestimators.if_delegate_has_method",
"matplotlib.pyplot.subplots",
"sklearn.metrics.scorer.check_scoring",
"numpy.mean",
"numpy.std",
"pandas.get_dummies",
"sklearn.utils.check_random_state",
"sklearn.base.clone",
"matplotlib.pyplot.tight_layout",
"sklearn.utils.check_array",
"pandas.read_csv",
"sklearn.base.is_classifier",
"sklearn.ensemble.GradientBoostingClassifier"
]
] |
razamu15/scikit-learn | [
"6e2ad76ccda5d86444f74a34d98bfbfe6f345b1c"
] | [
"sklearn/impute/tests/test_impute.py"
] | [
"import pytest\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy.stats import kstest\n\nimport io\n\nfrom sklearn.utils._testing import assert_allclose\nfrom sklearn.utils._testing import assert_allclose_dense_sparse\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_array_almost_equal\n\n# make IterativeImputer available\nfrom sklearn.experimental import enable_iterative_imputer # noqa\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.datasets import load_diabetes, fetch_california_housing\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.impute import MissingIndicator\nfrom sklearn.impute import SimpleImputer, IterativeImputer\nfrom sklearn.dummy import DummyRegressor\nfrom sklearn.linear_model import BayesianRidge, ARDRegression, RidgeCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.pipeline import make_union\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn import tree\nfrom sklearn.random_projection import _sparse_random_matrix\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.impute._base import _most_frequent\n\n\ndef _assert_array_equal_and_same_dtype(x, y):\n assert_array_equal(x, y)\n assert x.dtype == y.dtype\n\n\ndef _assert_allclose_and_same_dtype(x, y):\n assert_allclose(x, y)\n assert x.dtype == y.dtype\n\n\ndef _check_statistics(X, X_true, strategy, statistics, missing_values):\n \"\"\"Utility function for testing imputation for a given strategy.\n\n Test with dense and sparse arrays\n\n Check that:\n - the statistics (mean, median, mode) are correct\n - the missing values are imputed correctly\"\"\"\n\n err_msg = \"Parameters: strategy = %s, missing_values = %s, sparse = {0}\" % (\n strategy,\n missing_values,\n )\n\n assert_ae = assert_array_equal\n\n if X.dtype.kind == \"f\" or X_true.dtype.kind == \"f\":\n assert_ae = assert_array_almost_equal\n\n # Normal matrix\n imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)\n X_trans = imputer.fit(X).transform(X.copy())\n assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False))\n assert_ae(X_trans, X_true, err_msg=err_msg.format(False))\n\n # Sparse matrix\n imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)\n imputer.fit(sparse.csc_matrix(X))\n X_trans = imputer.transform(sparse.csc_matrix(X.copy()))\n\n if sparse.issparse(X_trans):\n X_trans = X_trans.toarray()\n\n assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True))\n assert_ae(X_trans, X_true, err_msg=err_msg.format(True))\n\n\[email protected](\"strategy\", [\"mean\", \"median\", \"most_frequent\", \"constant\"])\ndef test_imputation_shape(strategy):\n # Verify the shapes of the imputed matrix for different strategies.\n X = np.random.randn(10, 2)\n X[::2] = np.nan\n\n imputer = SimpleImputer(strategy=strategy)\n X_imputed = imputer.fit_transform(sparse.csr_matrix(X))\n assert X_imputed.shape == (10, 2)\n X_imputed = imputer.fit_transform(X)\n assert X_imputed.shape == (10, 2)\n\n iterative_imputer = IterativeImputer(initial_strategy=strategy)\n X_imputed = iterative_imputer.fit_transform(X)\n assert X_imputed.shape == (10, 2)\n\n\[email protected](\"strategy\", [\"const\", 101, None])\ndef test_imputation_error_invalid_strategy(strategy):\n X = np.ones((3, 5))\n X[0, 0] = np.nan\n\n with pytest.raises(ValueError, match=str(strategy)):\n imputer = SimpleImputer(strategy=strategy)\n imputer.fit_transform(X)\n\n\[email protected](\"strategy\", [\"mean\", \"median\", \"most_frequent\"])\ndef test_imputation_deletion_warning(strategy):\n X = np.ones((3, 5))\n X[:, 0] = np.nan\n imputer = SimpleImputer(strategy=strategy, verbose=1)\n\n # TODO: Remove in 1.3\n with pytest.warns(FutureWarning, match=\"The 'verbose' parameter\"):\n imputer.fit(X)\n\n with pytest.warns(UserWarning, match=\"Skipping\"):\n imputer.transform(X)\n\n\[email protected](\"strategy\", [\"mean\", \"median\", \"most_frequent\"])\ndef test_imputation_deletion_warning_feature_names(strategy):\n\n pd = pytest.importorskip(\"pandas\")\n\n missing_values = np.nan\n feature_names = np.array([\"a\", \"b\", \"c\", \"d\"], dtype=object)\n X = pd.DataFrame(\n [\n [missing_values, missing_values, 1, missing_values],\n [4, missing_values, 2, 10],\n ],\n columns=feature_names,\n )\n\n imputer = SimpleImputer(strategy=strategy, verbose=1)\n\n # TODO: Remove in 1.3\n with pytest.warns(FutureWarning, match=\"The 'verbose' parameter\"):\n imputer.fit(X)\n\n # check SimpleImputer returning feature name attribute correctly\n assert_array_equal(imputer.feature_names_in_, feature_names)\n\n # ensure that skipped feature warning includes feature name\n with pytest.warns(\n UserWarning, match=r\"Skipping features without any observed values: \\['b'\\]\"\n ):\n imputer.transform(X)\n\n\[email protected](\"strategy\", [\"mean\", \"median\", \"most_frequent\", \"constant\"])\ndef test_imputation_error_sparse_0(strategy):\n # check that error are raised when missing_values = 0 and input is sparse\n X = np.ones((3, 5))\n X[0] = 0\n X = sparse.csc_matrix(X)\n\n imputer = SimpleImputer(strategy=strategy, missing_values=0)\n with pytest.raises(ValueError, match=\"Provide a dense array\"):\n imputer.fit(X)\n\n imputer.fit(X.toarray())\n with pytest.raises(ValueError, match=\"Provide a dense array\"):\n imputer.transform(X)\n\n\ndef safe_median(arr, *args, **kwargs):\n # np.median([]) raises a TypeError for numpy >= 1.10.1\n length = arr.size if hasattr(arr, \"size\") else len(arr)\n return np.nan if length == 0 else np.median(arr, *args, **kwargs)\n\n\ndef safe_mean(arr, *args, **kwargs):\n # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1\n length = arr.size if hasattr(arr, \"size\") else len(arr)\n return np.nan if length == 0 else np.mean(arr, *args, **kwargs)\n\n\ndef test_imputation_mean_median():\n # Test imputation using the mean and median strategies, when\n # missing_values != 0.\n rng = np.random.RandomState(0)\n\n dim = 10\n dec = 10\n shape = (dim * dim, dim + dec)\n\n zeros = np.zeros(shape[0])\n values = np.arange(1, shape[0] + 1)\n values[4::2] = -values[4::2]\n\n tests = [\n (\"mean\", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))),\n (\"median\", np.nan, lambda z, v, p: safe_median(np.hstack((z, v)))),\n ]\n\n for strategy, test_missing_values, true_value_fun in tests:\n X = np.empty(shape)\n X_true = np.empty(shape)\n true_statistics = np.empty(shape[1])\n\n # Create a matrix X with columns\n # - with only zeros,\n # - with only missing values\n # - with zeros, missing values and values\n # And a matrix X_true containing all true values\n for j in range(shape[1]):\n nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)\n nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0)\n nb_values = shape[0] - nb_zeros - nb_missing_values\n\n z = zeros[:nb_zeros]\n p = np.repeat(test_missing_values, nb_missing_values)\n v = values[rng.permutation(len(values))[:nb_values]]\n\n true_statistics[j] = true_value_fun(z, v, p)\n\n # Create the columns\n X[:, j] = np.hstack((v, z, p))\n\n if 0 == test_missing_values:\n # XXX unreached code as of v0.22\n X_true[:, j] = np.hstack(\n (v, np.repeat(true_statistics[j], nb_missing_values + nb_zeros))\n )\n else:\n X_true[:, j] = np.hstack(\n (v, z, np.repeat(true_statistics[j], nb_missing_values))\n )\n\n # Shuffle them the same way\n np.random.RandomState(j).shuffle(X[:, j])\n np.random.RandomState(j).shuffle(X_true[:, j])\n\n # Mean doesn't support columns containing NaNs, median does\n if strategy == \"median\":\n cols_to_keep = ~np.isnan(X_true).any(axis=0)\n else:\n cols_to_keep = ~np.isnan(X_true).all(axis=0)\n\n X_true = X_true[:, cols_to_keep]\n\n _check_statistics(X, X_true, strategy, true_statistics, test_missing_values)\n\n\ndef test_imputation_median_special_cases():\n # Test median imputation with sparse boundary cases\n X = np.array(\n [\n [0, np.nan, np.nan], # odd: implicit zero\n [5, np.nan, np.nan], # odd: explicit nonzero\n [0, 0, np.nan], # even: average two zeros\n [-5, 0, np.nan], # even: avg zero and neg\n [0, 5, np.nan], # even: avg zero and pos\n [4, 5, np.nan], # even: avg nonzeros\n [-4, -5, np.nan], # even: avg negatives\n [-1, 2, np.nan], # even: crossing neg and pos\n ]\n ).transpose()\n\n X_imputed_median = np.array(\n [\n [0, 0, 0],\n [5, 5, 5],\n [0, 0, 0],\n [-5, 0, -2.5],\n [0, 5, 2.5],\n [4, 5, 4.5],\n [-4, -5, -4.5],\n [-1, 2, 0.5],\n ]\n ).transpose()\n statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, 0.5]\n\n _check_statistics(X, X_imputed_median, \"median\", statistics_median, np.nan)\n\n\[email protected](\"strategy\", [\"mean\", \"median\"])\[email protected](\"dtype\", [None, object, str])\ndef test_imputation_mean_median_error_invalid_type(strategy, dtype):\n X = np.array([[\"a\", \"b\", 3], [4, \"e\", 6], [\"g\", \"h\", 9]], dtype=dtype)\n msg = \"non-numeric data:\\ncould not convert string to float: '\"\n with pytest.raises(ValueError, match=msg):\n imputer = SimpleImputer(strategy=strategy)\n imputer.fit_transform(X)\n\n\[email protected](\"strategy\", [\"mean\", \"median\"])\[email protected](\"type\", [\"list\", \"dataframe\"])\ndef test_imputation_mean_median_error_invalid_type_list_pandas(strategy, type):\n X = [[\"a\", \"b\", 3], [4, \"e\", 6], [\"g\", \"h\", 9]]\n if type == \"dataframe\":\n pd = pytest.importorskip(\"pandas\")\n X = pd.DataFrame(X)\n msg = \"non-numeric data:\\ncould not convert string to float: '\"\n with pytest.raises(ValueError, match=msg):\n imputer = SimpleImputer(strategy=strategy)\n imputer.fit_transform(X)\n\n\[email protected](\"strategy\", [\"constant\", \"most_frequent\"])\[email protected](\"dtype\", [str, np.dtype(\"U\"), np.dtype(\"S\")])\ndef test_imputation_const_mostf_error_invalid_types(strategy, dtype):\n # Test imputation on non-numeric data using \"most_frequent\" and \"constant\"\n # strategy\n X = np.array(\n [\n [np.nan, np.nan, \"a\", \"f\"],\n [np.nan, \"c\", np.nan, \"d\"],\n [np.nan, \"b\", \"d\", np.nan],\n [np.nan, \"c\", \"d\", \"h\"],\n ],\n dtype=dtype,\n )\n\n err_msg = \"SimpleImputer does not support data\"\n with pytest.raises(ValueError, match=err_msg):\n imputer = SimpleImputer(strategy=strategy)\n imputer.fit(X).transform(X)\n\n\ndef test_imputation_most_frequent():\n # Test imputation using the most-frequent strategy.\n X = np.array(\n [\n [-1, -1, 0, 5],\n [-1, 2, -1, 3],\n [-1, 1, 3, -1],\n [-1, 2, 3, 7],\n ]\n )\n\n X_true = np.array(\n [\n [2, 0, 5],\n [2, 3, 3],\n [1, 3, 3],\n [2, 3, 7],\n ]\n )\n\n # scipy.stats.mode, used in SimpleImputer, doesn't return the first most\n # frequent as promised in the doc but the lowest most frequent. When this\n # test will fail after an update of scipy, SimpleImputer will need to be\n # updated to be consistent with the new (correct) behaviour\n _check_statistics(X, X_true, \"most_frequent\", [np.nan, 2, 3, 3], -1)\n\n\[email protected](\"marker\", [None, np.nan, \"NAN\", \"\", 0])\ndef test_imputation_most_frequent_objects(marker):\n # Test imputation using the most-frequent strategy.\n X = np.array(\n [\n [marker, marker, \"a\", \"f\"],\n [marker, \"c\", marker, \"d\"],\n [marker, \"b\", \"d\", marker],\n [marker, \"c\", \"d\", \"h\"],\n ],\n dtype=object,\n )\n\n X_true = np.array(\n [\n [\"c\", \"a\", \"f\"],\n [\"c\", \"d\", \"d\"],\n [\"b\", \"d\", \"d\"],\n [\"c\", \"d\", \"h\"],\n ],\n dtype=object,\n )\n\n imputer = SimpleImputer(missing_values=marker, strategy=\"most_frequent\")\n X_trans = imputer.fit(X).transform(X)\n\n assert_array_equal(X_trans, X_true)\n\n\[email protected](\"dtype\", [object, \"category\"])\ndef test_imputation_most_frequent_pandas(dtype):\n # Test imputation using the most frequent strategy on pandas df\n pd = pytest.importorskip(\"pandas\")\n\n f = io.StringIO(\"Cat1,Cat2,Cat3,Cat4\\n,i,x,\\na,,y,\\na,j,,\\nb,j,x,\")\n\n df = pd.read_csv(f, dtype=dtype)\n\n X_true = np.array(\n [[\"a\", \"i\", \"x\"], [\"a\", \"j\", \"y\"], [\"a\", \"j\", \"x\"], [\"b\", \"j\", \"x\"]],\n dtype=object,\n )\n\n imputer = SimpleImputer(strategy=\"most_frequent\")\n X_trans = imputer.fit_transform(df)\n\n assert_array_equal(X_trans, X_true)\n\n\[email protected](\"X_data, missing_value\", [(1, 0), (1.0, np.nan)])\ndef test_imputation_constant_error_invalid_type(X_data, missing_value):\n # Verify that exceptions are raised on invalid fill_value type\n X = np.full((3, 5), X_data, dtype=float)\n X[0, 0] = missing_value\n\n with pytest.raises(ValueError, match=\"imputing numerical\"):\n imputer = SimpleImputer(\n missing_values=missing_value, strategy=\"constant\", fill_value=\"x\"\n )\n imputer.fit_transform(X)\n\n\ndef test_imputation_constant_integer():\n # Test imputation using the constant strategy on integers\n X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]])\n\n X_true = np.array([[0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0]])\n\n imputer = SimpleImputer(missing_values=-1, strategy=\"constant\", fill_value=0)\n X_trans = imputer.fit_transform(X)\n\n assert_array_equal(X_trans, X_true)\n\n\[email protected](\"array_constructor\", [sparse.csr_matrix, np.asarray])\ndef test_imputation_constant_float(array_constructor):\n # Test imputation using the constant strategy on floats\n X = np.array(\n [\n [np.nan, 1.1, 0, np.nan],\n [1.2, np.nan, 1.3, np.nan],\n [0, 0, np.nan, np.nan],\n [1.4, 1.5, 0, np.nan],\n ]\n )\n\n X_true = np.array(\n [[-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1]]\n )\n\n X = array_constructor(X)\n\n X_true = array_constructor(X_true)\n\n imputer = SimpleImputer(strategy=\"constant\", fill_value=-1)\n X_trans = imputer.fit_transform(X)\n\n assert_allclose_dense_sparse(X_trans, X_true)\n\n\[email protected](\"marker\", [None, np.nan, \"NAN\", \"\", 0])\ndef test_imputation_constant_object(marker):\n # Test imputation using the constant strategy on objects\n X = np.array(\n [\n [marker, \"a\", \"b\", marker],\n [\"c\", marker, \"d\", marker],\n [\"e\", \"f\", marker, marker],\n [\"g\", \"h\", \"i\", marker],\n ],\n dtype=object,\n )\n\n X_true = np.array(\n [\n [\"missing\", \"a\", \"b\", \"missing\"],\n [\"c\", \"missing\", \"d\", \"missing\"],\n [\"e\", \"f\", \"missing\", \"missing\"],\n [\"g\", \"h\", \"i\", \"missing\"],\n ],\n dtype=object,\n )\n\n imputer = SimpleImputer(\n missing_values=marker, strategy=\"constant\", fill_value=\"missing\"\n )\n X_trans = imputer.fit_transform(X)\n\n assert_array_equal(X_trans, X_true)\n\n\[email protected](\"dtype\", [object, \"category\"])\ndef test_imputation_constant_pandas(dtype):\n # Test imputation using the constant strategy on pandas df\n pd = pytest.importorskip(\"pandas\")\n\n f = io.StringIO(\"Cat1,Cat2,Cat3,Cat4\\n,i,x,\\na,,y,\\na,j,,\\nb,j,x,\")\n\n df = pd.read_csv(f, dtype=dtype)\n\n X_true = np.array(\n [\n [\"missing_value\", \"i\", \"x\", \"missing_value\"],\n [\"a\", \"missing_value\", \"y\", \"missing_value\"],\n [\"a\", \"j\", \"missing_value\", \"missing_value\"],\n [\"b\", \"j\", \"x\", \"missing_value\"],\n ],\n dtype=object,\n )\n\n imputer = SimpleImputer(strategy=\"constant\")\n X_trans = imputer.fit_transform(df)\n\n assert_array_equal(X_trans, X_true)\n\n\[email protected](\"X\", [[[1], [2]], [[1], [np.nan]]])\ndef test_iterative_imputer_one_feature(X):\n # check we exit early when there is a single feature\n imputer = IterativeImputer().fit(X)\n assert imputer.n_iter_ == 0\n imputer = IterativeImputer()\n imputer.fit([[1], [2]])\n assert imputer.n_iter_ == 0\n imputer.fit([[1], [np.nan]])\n assert imputer.n_iter_ == 0\n\n\ndef test_imputation_pipeline_grid_search():\n # Test imputation within a pipeline + gridsearch.\n X = _sparse_random_matrix(100, 100, density=0.10)\n missing_values = X.data[0]\n\n pipeline = Pipeline(\n [\n (\"imputer\", SimpleImputer(missing_values=missing_values)),\n (\"tree\", tree.DecisionTreeRegressor(random_state=0)),\n ]\n )\n\n parameters = {\"imputer__strategy\": [\"mean\", \"median\", \"most_frequent\"]}\n\n Y = _sparse_random_matrix(100, 1, density=0.10).toarray()\n gs = GridSearchCV(pipeline, parameters)\n gs.fit(X, Y)\n\n\ndef test_imputation_copy():\n # Test imputation with copy\n X_orig = _sparse_random_matrix(5, 5, density=0.75, random_state=0)\n\n # copy=True, dense => copy\n X = X_orig.copy().toarray()\n imputer = SimpleImputer(missing_values=0, strategy=\"mean\", copy=True)\n Xt = imputer.fit(X).transform(X)\n Xt[0, 0] = -1\n assert not np.all(X == Xt)\n\n # copy=True, sparse csr => copy\n X = X_orig.copy()\n imputer = SimpleImputer(missing_values=X.data[0], strategy=\"mean\", copy=True)\n Xt = imputer.fit(X).transform(X)\n Xt.data[0] = -1\n assert not np.all(X.data == Xt.data)\n\n # copy=False, dense => no copy\n X = X_orig.copy().toarray()\n imputer = SimpleImputer(missing_values=0, strategy=\"mean\", copy=False)\n Xt = imputer.fit(X).transform(X)\n Xt[0, 0] = -1\n assert_array_almost_equal(X, Xt)\n\n # copy=False, sparse csc => no copy\n X = X_orig.copy().tocsc()\n imputer = SimpleImputer(missing_values=X.data[0], strategy=\"mean\", copy=False)\n Xt = imputer.fit(X).transform(X)\n Xt.data[0] = -1\n assert_array_almost_equal(X.data, Xt.data)\n\n # copy=False, sparse csr => copy\n X = X_orig.copy()\n imputer = SimpleImputer(missing_values=X.data[0], strategy=\"mean\", copy=False)\n Xt = imputer.fit(X).transform(X)\n Xt.data[0] = -1\n assert not np.all(X.data == Xt.data)\n\n # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is\n # made, even if copy=False.\n\n\ndef test_iterative_imputer_zero_iters():\n rng = np.random.RandomState(0)\n\n n = 100\n d = 10\n X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()\n missing_flag = X == 0\n X[missing_flag] = np.nan\n\n imputer = IterativeImputer(max_iter=0)\n X_imputed = imputer.fit_transform(X)\n # with max_iter=0, only initial imputation is performed\n assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))\n\n # repeat but force n_iter_ to 0\n imputer = IterativeImputer(max_iter=5).fit(X)\n # transformed should not be equal to initial imputation\n assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X))\n\n imputer.n_iter_ = 0\n # now they should be equal as only initial imputation is done\n assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X))\n\n\ndef test_iterative_imputer_verbose():\n rng = np.random.RandomState(0)\n\n n = 100\n d = 3\n X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()\n imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1)\n imputer.fit(X)\n imputer.transform(X)\n imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2)\n imputer.fit(X)\n imputer.transform(X)\n\n\ndef test_iterative_imputer_all_missing():\n n = 100\n d = 3\n X = np.zeros((n, d))\n imputer = IterativeImputer(missing_values=0, max_iter=1)\n X_imputed = imputer.fit_transform(X)\n assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))\n\n\[email protected](\n \"imputation_order\", [\"random\", \"roman\", \"ascending\", \"descending\", \"arabic\"]\n)\ndef test_iterative_imputer_imputation_order(imputation_order):\n rng = np.random.RandomState(0)\n n = 100\n d = 10\n max_iter = 2\n X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()\n X[:, 0] = 1 # this column should not be discarded by IterativeImputer\n\n imputer = IterativeImputer(\n missing_values=0,\n max_iter=max_iter,\n n_nearest_features=5,\n sample_posterior=False,\n skip_complete=True,\n min_value=0,\n max_value=1,\n verbose=1,\n imputation_order=imputation_order,\n random_state=rng,\n )\n imputer.fit_transform(X)\n ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_]\n\n assert len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_\n\n if imputation_order == \"roman\":\n assert np.all(ordered_idx[: d - 1] == np.arange(1, d))\n elif imputation_order == \"arabic\":\n assert np.all(ordered_idx[: d - 1] == np.arange(d - 1, 0, -1))\n elif imputation_order == \"random\":\n ordered_idx_round_1 = ordered_idx[: d - 1]\n ordered_idx_round_2 = ordered_idx[d - 1 :]\n assert ordered_idx_round_1 != ordered_idx_round_2\n elif \"ending\" in imputation_order:\n assert len(ordered_idx) == max_iter * (d - 1)\n\n\[email protected](\n \"estimator\", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV(),PLSRegression()]\n)\ndef test_iterative_imputer_estimators(estimator):\n rng = np.random.RandomState(0)\n\n n = 100\n d = 10\n X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()\n\n imputer = IterativeImputer(\n missing_values=0, max_iter=1, estimator=estimator, random_state=rng\n )\n imputer.fit_transform(X)\n\n # check that types are correct for estimators\n hashes = []\n for triplet in imputer.imputation_sequence_:\n expected_type = (\n type(estimator) if estimator is not None else type(BayesianRidge())\n )\n assert isinstance(triplet.estimator, expected_type)\n hashes.append(id(triplet.estimator))\n\n # check that each estimator is unique\n assert len(set(hashes)) == len(hashes)\n\n\ndef test_multiple_dimension_PLS_dataset1():\n\n # Import random field generator\n rng = np.random.RandomState(42)\n\n # Fetch the values from the california housing dataset\n X_california, y_california = fetch_california_housing(return_X_y=True)\n\n # Collect a subset for the values for the test\n X_california = X_california[:400]\n y_california = y_california[:400]\n\n # Used to remove values 75% of values from dataset array\n def add_missing_values(X_full):\n n_samples, n_features = X_full.shape\n\n # Determine the number of Nans to be set\n missing_rate = 0.75\n n_missing_samples = int(n_samples * missing_rate)\n \n # Determine ranom indices to be changed to Nans in the dataset array\n missing_samples = np.zeros(n_samples, dtype=bool)\n missing_samples[: n_missing_samples] = True\n rng.shuffle(missing_samples)\n\n # Change the values from the dataset array to Nans\n missing_features = rng.randint(0, n_features, n_missing_samples)\n X_missing = X_full.copy()\n X_missing[missing_samples, missing_features] = np.nan\n\n return X_missing\n\n # Generate 2D array with missing data\n X_miss_california = add_missing_values(X_california)\n\n # Create imputer with estimator as PLSRegression and impute dataset\n imputer = IterativeImputer(estimator=PLSRegression(n_components=2))\n X_imputed = imputer.fit_transform(X_miss_california)\n\n # Verify the dimension of the inputed dataset\n assert X_california.shape == X_imputed.shape\n\n # Verify non-missing values were unchanged in the dataset\n non_missing = ~np.isnan(X_miss_california.flatten())\n assert np.all(X_miss_california.flatten()[non_missing] ==\n X_imputed.flatten()[non_missing])\n\n\ndef test_multiple_dimension_PLS2_dataset2():\n\n #Import random field generator\n rng = np.random.RandomState(42)\n\n #Fetch the values from the california housing dataset\n X_diabetes, Y_diabetes = load_diabetes(return_X_y=True)\n\n #Collect a subset for the values for the test\n X_diabetes = X_diabetes[:200]\n Y_diabetes = Y_diabetes[:200]\n\n #Used to remove values 80% of values from dataset array\n def add_missing_values(X_full, y_full):\n n_samples, n_features = X_full.shape\n\n # Determine the number of Nans to be set \n missing_rate = 0.80\n n_missing_samples = int(n_samples * missing_rate)\n\n # Determine ranom indices to be changed to Nans in the dataset array\n missing_samples = np.zeros(n_samples, dtype=bool)\n missing_samples[: n_missing_samples] = True\n rng.shuffle(missing_samples)\n\n # Change the values from the dataset array to Nans\n missing_features = rng.randint(0, n_features, n_missing_samples)\n X_missing = X_full.copy()\n X_missing[missing_samples, missing_features] = np.nan\n y_missing = y_full.copy()\n\n return X_missing, y_missing\n\n # Generate 2D array with missing data\n X_miss_diabetes, Y_miss_diabetes = add_missing_values(\n X_diabetes, Y_diabetes)\n\n # Create imputer with estimator as PLSRegression\n imputer = IterativeImputer(estimator=PLSRegression(n_components=2))\n\n X_imputed = imputer.fit_transform(X_miss_diabetes)\n\n # Verify the dimensions of the imputed dataset\n assert X_diabetes.shape == X_imputed.shape \n\n # Verify that non-missing values stayed the same\n non_missing = ~np.isnan(X_miss_diabetes.flatten())\n assert np.all(X_miss_diabetes.flatten()[non_missing] ==\n X_imputed.flatten()[non_missing])\n\n\ndef test_iterative_imputer_clip():\n rng = np.random.RandomState(0)\n n = 100\n d = 10\n X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()\n\n imputer = IterativeImputer(\n missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng\n )\n\n Xt = imputer.fit_transform(X)\n assert_allclose(np.min(Xt[X == 0]), 0.1)\n assert_allclose(np.max(Xt[X == 0]), 0.2)\n assert_allclose(Xt[X != 0], X[X != 0])\n\n\ndef test_iterative_imputer_clip_truncnorm():\n rng = np.random.RandomState(0)\n n = 100\n d = 10\n X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()\n X[:, 0] = 1\n\n imputer = IterativeImputer(\n missing_values=0,\n max_iter=2,\n n_nearest_features=5,\n sample_posterior=True,\n min_value=0.1,\n max_value=0.2,\n verbose=1,\n imputation_order=\"random\",\n random_state=rng,\n )\n Xt = imputer.fit_transform(X)\n assert_allclose(np.min(Xt[X == 0]), 0.1)\n assert_allclose(np.max(Xt[X == 0]), 0.2)\n assert_allclose(Xt[X != 0], X[X != 0])\n\n\ndef test_iterative_imputer_truncated_normal_posterior():\n # test that the values that are imputed using `sample_posterior=True`\n # with boundaries (`min_value` and `max_value` are not None) are drawn\n # from a distribution that looks gaussian via the Kolmogorov Smirnov test.\n # note that starting from the wrong random seed will make this test fail\n # because random sampling doesn't occur at all when the imputation\n # is outside of the (min_value, max_value) range\n rng = np.random.RandomState(42)\n\n X = rng.normal(size=(5, 5))\n X[0][0] = np.nan\n\n imputer = IterativeImputer(\n min_value=0, max_value=0.5, sample_posterior=True, random_state=rng\n )\n\n imputer.fit_transform(X)\n # generate multiple imputations for the single missing value\n imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)])\n\n assert all(imputations >= 0)\n assert all(imputations <= 0.5)\n\n mu, sigma = imputations.mean(), imputations.std()\n ks_statistic, p_value = kstest((imputations - mu) / sigma, \"norm\")\n if sigma == 0:\n sigma += 1e-12\n ks_statistic, p_value = kstest((imputations - mu) / sigma, \"norm\")\n # we want to fail to reject null hypothesis\n # null hypothesis: distributions are the same\n assert ks_statistic < 0.2 or p_value > 0.1, \"The posterior does appear to be normal\"\n\n\[email protected](\"strategy\", [\"mean\", \"median\", \"most_frequent\"])\ndef test_iterative_imputer_missing_at_transform(strategy):\n rng = np.random.RandomState(0)\n n = 100\n d = 10\n X_train = rng.randint(low=0, high=3, size=(n, d))\n X_test = rng.randint(low=0, high=3, size=(n, d))\n\n X_train[:, 0] = 1 # definitely no missing values in 0th column\n X_test[0, 0] = 0 # definitely missing value in 0th column\n\n imputer = IterativeImputer(\n missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng\n ).fit(X_train)\n initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train)\n\n # if there were no missing values at time of fit, then imputer will\n # only use the initial imputer for that feature at transform\n assert_allclose(\n imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0]\n )\n\n\ndef test_iterative_imputer_transform_stochasticity():\n rng1 = np.random.RandomState(0)\n rng2 = np.random.RandomState(1)\n n = 100\n d = 10\n X = _sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray()\n\n # when sample_posterior=True, two transforms shouldn't be equal\n imputer = IterativeImputer(\n missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1\n )\n imputer.fit(X)\n\n X_fitted_1 = imputer.transform(X)\n X_fitted_2 = imputer.transform(X)\n\n # sufficient to assert that the means are not the same\n assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2))\n\n # when sample_posterior=False, and n_nearest_features=None\n # and imputation_order is not random\n # the two transforms should be identical even if rng are different\n imputer1 = IterativeImputer(\n missing_values=0,\n max_iter=1,\n sample_posterior=False,\n n_nearest_features=None,\n imputation_order=\"ascending\",\n random_state=rng1,\n )\n\n imputer2 = IterativeImputer(\n missing_values=0,\n max_iter=1,\n sample_posterior=False,\n n_nearest_features=None,\n imputation_order=\"ascending\",\n random_state=rng2,\n )\n imputer1.fit(X)\n imputer2.fit(X)\n\n X_fitted_1a = imputer1.transform(X)\n X_fitted_1b = imputer1.transform(X)\n X_fitted_2 = imputer2.transform(X)\n\n assert_allclose(X_fitted_1a, X_fitted_1b)\n assert_allclose(X_fitted_1a, X_fitted_2)\n\n\ndef test_iterative_imputer_no_missing():\n rng = np.random.RandomState(0)\n X = rng.rand(100, 100)\n X[:, 0] = np.nan\n m1 = IterativeImputer(max_iter=10, random_state=rng)\n m2 = IterativeImputer(max_iter=10, random_state=rng)\n pred1 = m1.fit(X).transform(X)\n pred2 = m2.fit_transform(X)\n # should exclude the first column entirely\n assert_allclose(X[:, 1:], pred1)\n # fit and fit_transform should both be identical\n assert_allclose(pred1, pred2)\n\n\ndef test_iterative_imputer_rank_one():\n rng = np.random.RandomState(0)\n d = 50\n A = rng.rand(d, 1)\n B = rng.rand(1, d)\n X = np.dot(A, B)\n nan_mask = rng.rand(d, d) < 0.5\n X_missing = X.copy()\n X_missing[nan_mask] = np.nan\n\n imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng)\n X_filled = imputer.fit_transform(X_missing)\n assert_allclose(X_filled, X, atol=0.02)\n\n\[email protected](\"rank\", [3, 5])\ndef test_iterative_imputer_transform_recovery(rank):\n rng = np.random.RandomState(0)\n n = 70\n d = 70\n A = rng.rand(n, rank)\n B = rng.rand(rank, d)\n X_filled = np.dot(A, B)\n nan_mask = rng.rand(n, d) < 0.5\n X_missing = X_filled.copy()\n X_missing[nan_mask] = np.nan\n\n # split up data in half\n n = n // 2\n X_train = X_missing[:n]\n X_test_filled = X_filled[n:]\n X_test = X_missing[n:]\n\n imputer = IterativeImputer(\n max_iter=5, imputation_order=\"descending\", verbose=1, random_state=rng\n ).fit(X_train)\n X_test_est = imputer.transform(X_test)\n assert_allclose(X_test_filled, X_test_est, atol=0.1)\n\n\ndef test_iterative_imputer_additive_matrix():\n rng = np.random.RandomState(0)\n n = 100\n d = 10\n A = rng.randn(n, d)\n B = rng.randn(n, d)\n X_filled = np.zeros(A.shape)\n for i in range(d):\n for j in range(d):\n X_filled[:, (i + j) % d] += (A[:, i] + B[:, j]) / 2\n # a quarter is randomly missing\n nan_mask = rng.rand(n, d) < 0.25\n X_missing = X_filled.copy()\n X_missing[nan_mask] = np.nan\n\n # split up data\n n = n // 2\n X_train = X_missing[:n]\n X_test_filled = X_filled[n:]\n X_test = X_missing[n:]\n\n imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train)\n X_test_est = imputer.transform(X_test)\n assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01)\n\n\[email protected](\n \"max_iter, tol, error_type, warning\",\n [\n (-1, 1e-3, ValueError, \"should be a positive integer\"),\n (1, -1e-3, ValueError, \"should be a non-negative float\"),\n ],\n)\ndef test_iterative_imputer_error_param(max_iter, tol, error_type, warning):\n X = np.zeros((100, 2))\n imputer = IterativeImputer(max_iter=max_iter, tol=tol)\n with pytest.raises(error_type, match=warning):\n imputer.fit_transform(X)\n\n\ndef test_iterative_imputer_early_stopping():\n rng = np.random.RandomState(0)\n n = 50\n d = 5\n A = rng.rand(n, 1)\n B = rng.rand(1, d)\n X = np.dot(A, B)\n nan_mask = rng.rand(n, d) < 0.5\n X_missing = X.copy()\n X_missing[nan_mask] = np.nan\n\n imputer = IterativeImputer(\n max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng\n )\n X_filled_100 = imputer.fit_transform(X_missing)\n assert len(imputer.imputation_sequence_) == d * imputer.n_iter_\n\n imputer = IterativeImputer(\n max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng\n )\n X_filled_early = imputer.fit_transform(X_missing)\n assert_allclose(X_filled_100, X_filled_early, atol=1e-7)\n\n imputer = IterativeImputer(\n max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng\n )\n imputer.fit(X_missing)\n assert imputer.n_iter_ == imputer.max_iter\n\n\ndef test_iterative_imputer_catch_warning():\n # check that we catch a RuntimeWarning due to a division by zero when a\n # feature is constant in the dataset\n X, y = load_diabetes(return_X_y=True)\n n_samples, n_features = X.shape\n\n # simulate that a feature only contain one category during fit\n X[:, 3] = 1\n\n # add some missing values\n rng = np.random.RandomState(0)\n missing_rate = 0.15\n for feat in range(n_features):\n sample_idx = rng.choice(\n np.arange(n_samples), size=int(n_samples * missing_rate), replace=False\n )\n X[sample_idx, feat] = np.nan\n\n imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True)\n with pytest.warns(None) as record:\n X_fill = imputer.fit_transform(X, y)\n assert not [w.message for w in record]\n assert not np.any(np.isnan(X_fill))\n\n\[email protected](\n \"min_value, max_value, correct_output\",\n [\n (0, 100, np.array([[0] * 3, [100] * 3])),\n (None, None, np.array([[-np.inf] * 3, [np.inf] * 3])),\n (-np.inf, np.inf, np.array([[-np.inf] * 3, [np.inf] * 3])),\n ([-5, 5, 10], [100, 200, 300], np.array([[-5, 5, 10], [100, 200, 300]])),\n (\n [-5, -np.inf, 10],\n [100, 200, np.inf],\n np.array([[-5, -np.inf, 10], [100, 200, np.inf]]),\n ),\n ],\n ids=[\"scalars\", \"None-default\", \"inf\", \"lists\", \"lists-with-inf\"],\n)\ndef test_iterative_imputer_min_max_array_like(min_value, max_value, correct_output):\n # check that passing scalar or array-like\n # for min_value and max_value in IterativeImputer works\n X = np.random.RandomState(0).randn(10, 3)\n imputer = IterativeImputer(min_value=min_value, max_value=max_value)\n imputer.fit(X)\n\n assert isinstance(imputer._min_value, np.ndarray) and isinstance(\n imputer._max_value, np.ndarray\n )\n assert (imputer._min_value.shape[0] == X.shape[1]) and (\n imputer._max_value.shape[0] == X.shape[1]\n )\n\n assert_allclose(correct_output[0, :], imputer._min_value)\n assert_allclose(correct_output[1, :], imputer._max_value)\n\n\[email protected](\n \"min_value, max_value, err_msg\",\n [\n (100, 0, \"min_value >= max_value.\"),\n (np.inf, -np.inf, \"min_value >= max_value.\"),\n ([-5, 5], [100, 200, 0], \"_value' should be of shape\"),\n ],\n)\ndef test_iterative_imputer_catch_min_max_error(min_value, max_value, err_msg):\n # check that passing scalar or array-like\n # for min_value and max_value in IterativeImputer works\n X = np.random.random((10, 3))\n imputer = IterativeImputer(min_value=min_value, max_value=max_value)\n with pytest.raises(ValueError, match=err_msg):\n imputer.fit(X)\n\n\[email protected](\n \"min_max_1, min_max_2\",\n [([None, None], [-np.inf, np.inf]), ([-10, 10], [[-10] * 4, [10] * 4])],\n ids=[\"None-vs-inf\", \"Scalar-vs-vector\"],\n)\ndef test_iterative_imputer_min_max_array_like_imputation(min_max_1, min_max_2):\n # Test that None/inf and scalar/vector give the same imputation\n X_train = np.array(\n [\n [np.nan, 2, 2, 1],\n [10, np.nan, np.nan, 7],\n [3, 1, np.nan, 1],\n [np.nan, 4, 2, np.nan],\n ]\n )\n X_test = np.array(\n [[np.nan, 2, np.nan, 5], [2, 4, np.nan, np.nan], [np.nan, 1, 10, 1]]\n )\n imputer1 = IterativeImputer(\n min_value=min_max_1[0], max_value=min_max_1[1], random_state=0\n )\n imputer2 = IterativeImputer(\n min_value=min_max_2[0], max_value=min_max_2[1], random_state=0\n )\n X_test_imputed1 = imputer1.fit(X_train).transform(X_test)\n X_test_imputed2 = imputer2.fit(X_train).transform(X_test)\n assert_allclose(X_test_imputed1[:, 0], X_test_imputed2[:, 0])\n\n\[email protected](\"skip_complete\", [True, False])\ndef test_iterative_imputer_skip_non_missing(skip_complete):\n # check the imputing strategy when missing data are present in the\n # testing set only.\n # taken from: https://github.com/scikit-learn/scikit-learn/issues/14383\n rng = np.random.RandomState(0)\n X_train = np.array([[5, 2, 2, 1], [10, 1, 2, 7], [3, 1, 1, 1], [8, 4, 2, 2]])\n X_test = np.array([[np.nan, 2, 4, 5], [np.nan, 4, 1, 2], [np.nan, 1, 10, 1]])\n imputer = IterativeImputer(\n initial_strategy=\"mean\", skip_complete=skip_complete, random_state=rng\n )\n X_test_est = imputer.fit(X_train).transform(X_test)\n if skip_complete:\n # impute with the initial strategy: 'mean'\n assert_allclose(X_test_est[:, 0], np.mean(X_train[:, 0]))\n else:\n assert_allclose(X_test_est[:, 0], [11, 7, 12], rtol=1e-4)\n\n\[email protected](\"rs_imputer\", [None, 1, np.random.RandomState(seed=1)])\[email protected](\"rs_estimator\", [None, 1, np.random.RandomState(seed=1)])\ndef test_iterative_imputer_dont_set_random_state(rs_imputer, rs_estimator):\n class ZeroEstimator:\n def __init__(self, random_state):\n self.random_state = random_state\n\n def fit(self, *args, **kgards):\n return self\n\n def predict(self, X):\n return np.zeros(X.shape[0])\n\n estimator = ZeroEstimator(random_state=rs_estimator)\n imputer = IterativeImputer(random_state=rs_imputer)\n X_train = np.zeros((10, 3))\n imputer.fit(X_train)\n assert estimator.random_state == rs_estimator\n\n\[email protected](\n \"X_fit, X_trans, params, msg_err\",\n [\n (\n np.array([[-1, 1], [1, 2]]),\n np.array([[-1, 1], [1, -1]]),\n {\"features\": \"missing-only\", \"sparse\": \"auto\"},\n \"have missing values in transform but have no missing values in fit\",\n ),\n (\n np.array([[-1, 1], [1, 2]]),\n np.array([[-1, 1], [1, 2]]),\n {\"features\": \"random\", \"sparse\": \"auto\"},\n \"'features' has to be either 'missing-only' or 'all'\",\n ),\n (\n np.array([[-1, 1], [1, 2]]),\n np.array([[-1, 1], [1, 2]]),\n {\"features\": \"all\", \"sparse\": \"random\"},\n \"'sparse' has to be a boolean or 'auto'\",\n ),\n (\n np.array([[\"a\", \"b\"], [\"c\", \"a\"]], dtype=str),\n np.array([[\"a\", \"b\"], [\"c\", \"a\"]], dtype=str),\n {},\n \"MissingIndicator does not support data with dtype\",\n ),\n ],\n)\ndef test_missing_indicator_error(X_fit, X_trans, params, msg_err):\n indicator = MissingIndicator(missing_values=-1)\n indicator.set_params(**params)\n with pytest.raises(ValueError, match=msg_err):\n indicator.fit(X_fit).transform(X_trans)\n\n\[email protected](\n \"missing_values, dtype, arr_type\",\n [\n (np.nan, np.float64, np.array),\n (0, np.int32, np.array),\n (-1, np.int32, np.array),\n (np.nan, np.float64, sparse.csc_matrix),\n (-1, np.int32, sparse.csc_matrix),\n (np.nan, np.float64, sparse.csr_matrix),\n (-1, np.int32, sparse.csr_matrix),\n (np.nan, np.float64, sparse.coo_matrix),\n (-1, np.int32, sparse.coo_matrix),\n (np.nan, np.float64, sparse.lil_matrix),\n (-1, np.int32, sparse.lil_matrix),\n (np.nan, np.float64, sparse.bsr_matrix),\n (-1, np.int32, sparse.bsr_matrix),\n ],\n)\[email protected](\n \"param_features, n_features, features_indices\",\n [(\"missing-only\", 3, np.array([0, 1, 2])), (\"all\", 3, np.array([0, 1, 2]))],\n)\ndef test_missing_indicator_new(\n missing_values, arr_type, dtype, param_features, n_features, features_indices\n):\n X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]])\n X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])\n X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]])\n X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]])\n\n # convert the input to the right array format and right dtype\n X_fit = arr_type(X_fit).astype(dtype)\n X_trans = arr_type(X_trans).astype(dtype)\n X_fit_expected = X_fit_expected.astype(dtype)\n X_trans_expected = X_trans_expected.astype(dtype)\n\n indicator = MissingIndicator(\n missing_values=missing_values, features=param_features, sparse=False\n )\n X_fit_mask = indicator.fit_transform(X_fit)\n X_trans_mask = indicator.transform(X_trans)\n\n assert X_fit_mask.shape[1] == n_features\n assert X_trans_mask.shape[1] == n_features\n\n assert_array_equal(indicator.features_, features_indices)\n assert_allclose(X_fit_mask, X_fit_expected[:, features_indices])\n assert_allclose(X_trans_mask, X_trans_expected[:, features_indices])\n\n assert X_fit_mask.dtype == bool\n assert X_trans_mask.dtype == bool\n assert isinstance(X_fit_mask, np.ndarray)\n assert isinstance(X_trans_mask, np.ndarray)\n\n indicator.set_params(sparse=True)\n X_fit_mask_sparse = indicator.fit_transform(X_fit)\n X_trans_mask_sparse = indicator.transform(X_trans)\n\n assert X_fit_mask_sparse.dtype == bool\n assert X_trans_mask_sparse.dtype == bool\n assert X_fit_mask_sparse.format == \"csc\"\n assert X_trans_mask_sparse.format == \"csc\"\n assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask)\n assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask)\n\n\[email protected](\n \"arr_type\",\n [\n sparse.csc_matrix,\n sparse.csr_matrix,\n sparse.coo_matrix,\n sparse.lil_matrix,\n sparse.bsr_matrix,\n ],\n)\ndef test_missing_indicator_raise_on_sparse_with_missing_0(arr_type):\n # test for sparse input and missing_value == 0\n\n missing_values = 0\n X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]])\n X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])\n\n # convert the input to the right array format\n X_fit_sparse = arr_type(X_fit)\n X_trans_sparse = arr_type(X_trans)\n\n indicator = MissingIndicator(missing_values=missing_values)\n\n with pytest.raises(ValueError, match=\"Sparse input with missing_values=0\"):\n indicator.fit_transform(X_fit_sparse)\n\n indicator.fit_transform(X_fit)\n with pytest.raises(ValueError, match=\"Sparse input with missing_values=0\"):\n indicator.transform(X_trans_sparse)\n\n\[email protected](\"param_sparse\", [True, False, \"auto\"])\[email protected](\n \"missing_values, arr_type\",\n [\n (np.nan, np.array),\n (0, np.array),\n (np.nan, sparse.csc_matrix),\n (np.nan, sparse.csr_matrix),\n (np.nan, sparse.coo_matrix),\n (np.nan, sparse.lil_matrix),\n ],\n)\ndef test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse):\n # check the format of the output with different sparse parameter\n X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]])\n X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])\n X_fit = arr_type(X_fit).astype(np.float64)\n X_trans = arr_type(X_trans).astype(np.float64)\n\n indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse)\n X_fit_mask = indicator.fit_transform(X_fit)\n X_trans_mask = indicator.transform(X_trans)\n\n if param_sparse is True:\n assert X_fit_mask.format == \"csc\"\n assert X_trans_mask.format == \"csc\"\n elif param_sparse == \"auto\" and missing_values == 0:\n assert isinstance(X_fit_mask, np.ndarray)\n assert isinstance(X_trans_mask, np.ndarray)\n elif param_sparse is False:\n assert isinstance(X_fit_mask, np.ndarray)\n assert isinstance(X_trans_mask, np.ndarray)\n else:\n if sparse.issparse(X_fit):\n assert X_fit_mask.format == \"csc\"\n assert X_trans_mask.format == \"csc\"\n else:\n assert isinstance(X_fit_mask, np.ndarray)\n assert isinstance(X_trans_mask, np.ndarray)\n\n\ndef test_missing_indicator_string():\n X = np.array([[\"a\", \"b\", \"c\"], [\"b\", \"c\", \"a\"]], dtype=object)\n indicator = MissingIndicator(missing_values=\"a\", features=\"all\")\n X_trans = indicator.fit_transform(X)\n assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]]))\n\n\[email protected](\n \"X, missing_values, X_trans_exp\",\n [\n (\n np.array([[\"a\", \"b\"], [\"b\", \"a\"]], dtype=object),\n \"a\",\n np.array([[\"b\", \"b\", True, False], [\"b\", \"b\", False, True]], dtype=object),\n ),\n (\n np.array([[np.nan, 1.0], [1.0, np.nan]]),\n np.nan,\n np.array([[1.0, 1.0, True, False], [1.0, 1.0, False, True]]),\n ),\n (\n np.array([[np.nan, \"b\"], [\"b\", np.nan]], dtype=object),\n np.nan,\n np.array([[\"b\", \"b\", True, False], [\"b\", \"b\", False, True]], dtype=object),\n ),\n (\n np.array([[None, \"b\"], [\"b\", None]], dtype=object),\n None,\n np.array([[\"b\", \"b\", True, False], [\"b\", \"b\", False, True]], dtype=object),\n ),\n ],\n)\ndef test_missing_indicator_with_imputer(X, missing_values, X_trans_exp):\n trans = make_union(\n SimpleImputer(missing_values=missing_values, strategy=\"most_frequent\"),\n MissingIndicator(missing_values=missing_values),\n )\n X_trans = trans.fit_transform(X)\n assert_array_equal(X_trans, X_trans_exp)\n\n\[email protected](\"imputer_constructor\", [SimpleImputer, IterativeImputer])\[email protected](\n \"imputer_missing_values, missing_value, err_msg\",\n [\n (\"NaN\", np.nan, \"Input X contains NaN\"),\n (\"-1\", -1, \"types are expected to be both numerical.\"),\n ],\n)\ndef test_inconsistent_dtype_X_missing_values(\n imputer_constructor, imputer_missing_values, missing_value, err_msg\n):\n # regression test for issue #11390. Comparison between incoherent dtype\n # for X and missing_values was not raising a proper error.\n rng = np.random.RandomState(42)\n X = rng.randn(10, 10)\n X[0, 0] = missing_value\n\n imputer = imputer_constructor(missing_values=imputer_missing_values)\n\n with pytest.raises(ValueError, match=err_msg):\n imputer.fit_transform(X)\n\n\ndef test_missing_indicator_no_missing():\n # check that all features are dropped if there are no missing values when\n # features='missing-only' (#13491)\n X = np.array([[1, 1], [1, 1]])\n\n mi = MissingIndicator(features=\"missing-only\", missing_values=-1)\n Xt = mi.fit_transform(X)\n\n assert Xt.shape[1] == 0\n\n\ndef test_missing_indicator_sparse_no_explicit_zeros():\n # Check that non missing values don't become explicit zeros in the mask\n # generated by missing indicator when X is sparse. (#13491)\n X = sparse.csr_matrix([[0, 1, 2], [1, 2, 0], [2, 0, 1]])\n\n mi = MissingIndicator(features=\"all\", missing_values=1)\n Xt = mi.fit_transform(X)\n\n assert Xt.getnnz() == Xt.sum()\n\n\[email protected](\"imputer_constructor\", [SimpleImputer, IterativeImputer])\ndef test_imputer_without_indicator(imputer_constructor):\n X = np.array([[1, 1], [1, 1]])\n imputer = imputer_constructor()\n imputer.fit(X)\n\n assert imputer.indicator_ is None\n\n\[email protected](\n \"arr_type\",\n [\n sparse.csc_matrix,\n sparse.csr_matrix,\n sparse.coo_matrix,\n sparse.lil_matrix,\n sparse.bsr_matrix,\n ],\n)\ndef test_simple_imputation_add_indicator_sparse_matrix(arr_type):\n X_sparse = arr_type([[np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9]])\n X_true = np.array(\n [\n [3.0, 1.0, 5.0, 1.0, 0.0, 0.0],\n [2.0, 2.0, 1.0, 0.0, 1.0, 0.0],\n [6.0, 3.0, 5.0, 0.0, 0.0, 1.0],\n [1.0, 2.0, 9.0, 0.0, 0.0, 0.0],\n ]\n )\n\n imputer = SimpleImputer(missing_values=np.nan, add_indicator=True)\n X_trans = imputer.fit_transform(X_sparse)\n\n assert sparse.issparse(X_trans)\n assert X_trans.shape == X_true.shape\n assert_allclose(X_trans.toarray(), X_true)\n\n\[email protected](\n \"strategy, expected\", [(\"most_frequent\", \"b\"), (\"constant\", \"missing_value\")]\n)\ndef test_simple_imputation_string_list(strategy, expected):\n X = [[\"a\", \"b\"], [\"c\", np.nan]]\n\n X_true = np.array([[\"a\", \"b\"], [\"c\", expected]], dtype=object)\n\n imputer = SimpleImputer(strategy=strategy)\n X_trans = imputer.fit_transform(X)\n\n assert_array_equal(X_trans, X_true)\n\n\[email protected](\n \"order, idx_order\",\n [(\"ascending\", [3, 4, 2, 0, 1]), (\"descending\", [1, 0, 2, 4, 3])],\n)\ndef test_imputation_order(order, idx_order):\n # regression test for #15393\n rng = np.random.RandomState(42)\n X = rng.rand(100, 5)\n X[:50, 1] = np.nan\n X[:30, 0] = np.nan\n X[:20, 2] = np.nan\n X[:10, 4] = np.nan\n\n with pytest.warns(ConvergenceWarning):\n trs = IterativeImputer(max_iter=1, imputation_order=order, random_state=0).fit(\n X\n )\n idx = [x.feat_idx for x in trs.imputation_sequence_]\n assert idx == idx_order\n\n\[email protected](\"missing_value\", [-1, np.nan])\ndef test_simple_imputation_inverse_transform(missing_value):\n # Test inverse_transform feature for np.nan\n X_1 = np.array(\n [\n [9, missing_value, 3, -1],\n [4, -1, 5, 4],\n [6, 7, missing_value, -1],\n [8, 9, 0, missing_value],\n ]\n )\n\n X_2 = np.array(\n [\n [5, 4, 2, 1],\n [2, 1, missing_value, 3],\n [9, missing_value, 7, 1],\n [6, 4, 2, missing_value],\n ]\n )\n\n X_3 = np.array(\n [\n [1, missing_value, 5, 9],\n [missing_value, 4, missing_value, missing_value],\n [2, missing_value, 7, missing_value],\n [missing_value, 3, missing_value, 8],\n ]\n )\n\n X_4 = np.array(\n [\n [1, 1, 1, 3],\n [missing_value, 2, missing_value, 1],\n [2, 3, 3, 4],\n [missing_value, 4, missing_value, 2],\n ]\n )\n\n imputer = SimpleImputer(\n missing_values=missing_value, strategy=\"mean\", add_indicator=True\n )\n\n X_1_trans = imputer.fit_transform(X_1)\n X_1_inv_trans = imputer.inverse_transform(X_1_trans)\n\n X_2_trans = imputer.transform(X_2) # test on new data\n X_2_inv_trans = imputer.inverse_transform(X_2_trans)\n\n assert_array_equal(X_1_inv_trans, X_1)\n assert_array_equal(X_2_inv_trans, X_2)\n\n for X in [X_3, X_4]:\n X_trans = imputer.fit_transform(X)\n X_inv_trans = imputer.inverse_transform(X_trans)\n assert_array_equal(X_inv_trans, X)\n\n\[email protected](\"missing_value\", [-1, np.nan])\ndef test_simple_imputation_inverse_transform_exceptions(missing_value):\n X_1 = np.array(\n [\n [9, missing_value, 3, -1],\n [4, -1, 5, 4],\n [6, 7, missing_value, -1],\n [8, 9, 0, missing_value],\n ]\n )\n\n imputer = SimpleImputer(missing_values=missing_value, strategy=\"mean\")\n X_1_trans = imputer.fit_transform(X_1)\n with pytest.raises(\n ValueError, match=f\"Got 'add_indicator={imputer.add_indicator}'\"\n ):\n imputer.inverse_transform(X_1_trans)\n\n\[email protected](\n \"expected,array,dtype,extra_value,n_repeat\",\n [\n # array of object dtype\n (\"extra_value\", [\"a\", \"b\", \"c\"], object, \"extra_value\", 2),\n (\n \"most_frequent_value\",\n [\"most_frequent_value\", \"most_frequent_value\", \"value\"],\n object,\n \"extra_value\",\n 1,\n ),\n (\"a\", [\"min_value\", \"min_valuevalue\"], object, \"a\", 2),\n (\"min_value\", [\"min_value\", \"min_value\", \"value\"], object, \"z\", 2),\n # array of numeric dtype\n (10, [1, 2, 3], int, 10, 2),\n (1, [1, 1, 2], int, 10, 1),\n (10, [20, 20, 1], int, 10, 2),\n (1, [1, 1, 20], int, 10, 2),\n ],\n)\ndef test_most_frequent(expected, array, dtype, extra_value, n_repeat):\n assert expected == _most_frequent(\n np.array(array, dtype=dtype), extra_value, n_repeat\n )\n\n\ndef test_simple_impute_pd_na():\n pd = pytest.importorskip(\"pandas\")\n\n # Impute pandas array of string types.\n df = pd.DataFrame({\"feature\": pd.Series([\"abc\", None, \"de\"], dtype=\"string\")})\n imputer = SimpleImputer(missing_values=pd.NA, strategy=\"constant\", fill_value=\"na\")\n _assert_array_equal_and_same_dtype(\n imputer.fit_transform(df), np.array([[\"abc\"], [\"na\"], [\"de\"]], dtype=object)\n )\n\n # Impute pandas array of string types without any missing values.\n df = pd.DataFrame({\"feature\": pd.Series([\"abc\", \"de\", \"fgh\"], dtype=\"string\")})\n imputer = SimpleImputer(fill_value=\"ok\", strategy=\"constant\")\n _assert_array_equal_and_same_dtype(\n imputer.fit_transform(df), np.array([[\"abc\"], [\"de\"], [\"fgh\"]], dtype=object)\n )\n\n # Impute pandas array of integer types.\n df = pd.DataFrame({\"feature\": pd.Series([1, None, 3], dtype=\"Int64\")})\n imputer = SimpleImputer(missing_values=pd.NA, strategy=\"constant\", fill_value=-1)\n _assert_allclose_and_same_dtype(\n imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype=\"float64\")\n )\n\n # Use `np.nan` also works.\n imputer = SimpleImputer(missing_values=np.nan, strategy=\"constant\", fill_value=-1)\n _assert_allclose_and_same_dtype(\n imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype=\"float64\")\n )\n\n # Impute pandas array of integer types with 'median' strategy.\n df = pd.DataFrame({\"feature\": pd.Series([1, None, 2, 3], dtype=\"Int64\")})\n imputer = SimpleImputer(missing_values=pd.NA, strategy=\"median\")\n _assert_allclose_and_same_dtype(\n imputer.fit_transform(df), np.array([[1], [2], [2], [3]], dtype=\"float64\")\n )\n\n # Impute pandas array of integer types with 'mean' strategy.\n df = pd.DataFrame({\"feature\": pd.Series([1, None, 2], dtype=\"Int64\")})\n imputer = SimpleImputer(missing_values=pd.NA, strategy=\"mean\")\n _assert_allclose_and_same_dtype(\n imputer.fit_transform(df), np.array([[1], [1.5], [2]], dtype=\"float64\")\n )\n\n # Impute pandas array of float types.\n df = pd.DataFrame({\"feature\": pd.Series([1.0, None, 3.0], dtype=\"float64\")})\n imputer = SimpleImputer(missing_values=pd.NA, strategy=\"constant\", fill_value=-2.0)\n _assert_allclose_and_same_dtype(\n imputer.fit_transform(df), np.array([[1.0], [-2.0], [3.0]], dtype=\"float64\")\n )\n\n # Impute pandas array of float types with 'median' strategy.\n df = pd.DataFrame({\"feature\": pd.Series([1.0, None, 2.0, 3.0], dtype=\"float64\")})\n imputer = SimpleImputer(missing_values=pd.NA, strategy=\"median\")\n _assert_allclose_and_same_dtype(\n imputer.fit_transform(df),\n np.array([[1.0], [2.0], [2.0], [3.0]], dtype=\"float64\"),\n )\n\n\ndef test_missing_indicator_feature_names_out():\n \"\"\"Check that missing indicator return the feature names with a prefix.\"\"\"\n pd = pytest.importorskip(\"pandas\")\n\n missing_values = np.nan\n X = pd.DataFrame(\n [\n [missing_values, missing_values, 1, missing_values],\n [4, missing_values, 2, 10],\n ],\n columns=[\"a\", \"b\", \"c\", \"d\"],\n )\n\n indicator = MissingIndicator(missing_values=missing_values).fit(X)\n feature_names = indicator.get_feature_names_out()\n expected_names = [\"missingindicator_a\", \"missingindicator_b\", \"missingindicator_d\"]\n assert_array_equal(expected_names, feature_names)\n\n"
] | [
[
"sklearn.linear_model.RidgeCV",
"numpy.repeat",
"numpy.dot",
"sklearn.linear_model.ARDRegression",
"numpy.median",
"sklearn.impute.IterativeImputer",
"numpy.min",
"numpy.mean",
"sklearn.linear_model.BayesianRidge",
"sklearn.tree.DecisionTreeRegressor",
"numpy.random.random",
"numpy.dtype",
"numpy.max",
"sklearn.impute.SimpleImputer",
"numpy.full",
"sklearn.utils._testing.assert_array_almost_equal",
"numpy.empty",
"sklearn.datasets.load_diabetes",
"sklearn.datasets.fetch_california_housing",
"numpy.arange",
"scipy.sparse.csr_matrix",
"scipy.sparse.issparse",
"numpy.array",
"numpy.zeros",
"scipy.sparse.csc_matrix",
"numpy.random.randn",
"numpy.all",
"sklearn.impute.MissingIndicator",
"numpy.hstack",
"sklearn.cross_decomposition.PLSRegression",
"numpy.isnan",
"numpy.random.RandomState",
"sklearn.utils._testing.assert_array_equal",
"numpy.ones",
"sklearn.random_projection._sparse_random_matrix",
"sklearn.dummy.DummyRegressor",
"scipy.stats.kstest",
"sklearn.utils._testing.assert_allclose",
"sklearn.model_selection.GridSearchCV",
"sklearn.utils._testing.assert_allclose_dense_sparse"
]
] |
ab-sin-the/RL-based-CO | [
"7972d10bac68d91e056bd8a8b842ba8ebdef188e"
] | [
"main/main.py"
] | [
"import sys\nsys.path.append('../utils')\nimport utils\nimport numpy as np\nimport random\nimport math\nfrom tqdm import tqdm\n# To read input\n#benchmark: sites.nlsde.buaa.edu.cn/~kexu/benchmarks/graph-benchmarks.htm\ndef read(file):\n with open(file, 'r') as input:\n n = int(next(input))\n E = np.zeros(shape=(n,n), dtype=np.int)\n i = 0\n for line in input:\n j = 0\n for x in line.split():\n E[i][j] = int(x)\n j = j + 1\n i = i + 1\n return (n, E)\n\n\ndef main():\n # Main\n file_path = '../data/frb35.in'\n (n, E) = read(file_path)\n E = np.array(E)\n verticles = [i for i in range(n)]\n alpha = 0.5\n beta = 0.005\n gamma = 0.01\n theta = 0.8\n\n batch_size = math.ceil(n / 5)\n batch_size = n - 1\n utils.reinforcement_learning(alpha,beta,gamma,theta,E,batch_size)\n return\n\n\nif __name__ == '__main__':\n\tmain()"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
yuan776/scikit-multilearn | [
"5ad32df237e6a9746fd5ec2f9543dcd011e8cdd2"
] | [
"yyskmultilearn/adapt/mltsvm.py"
] | [
"# Authors: Grzegorz Kulakowski <[email protected]>\n# License: BSD 3 clause\nfrom yyskmultilearn.base import MLClassifierBase\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy.linalg import norm\nfrom scipy.sparse.linalg import inv as inv_sparse\nfrom scipy.linalg import inv as inv_dense\n\n\nclass MLTSVM(MLClassifierBase):\n \"\"\"Twin multi-Label Support Vector Machines\n\n Parameters\n ----------\n c_k : int\n the empirical risk penalty parameter that determines the trade-off between the loss terms\n sor_omega: float (default is 1.0)\n the smoothing parameter\n threshold : int (default is 1e-6)\n threshold above which a label should be assigned\n lambda_param : float (default is 1.0)\n the regularization parameter\n max_iteration : int (default is 500)\n maximum number of iterations to use in successive overrelaxation\n\n\n References\n ----------\n\n If you use this classifier please cite the original paper introducing the method:\n\n .. code :: bibtex\n\n @article{chen2016mltsvm,\n title={MLTSVM: a novel twin support vector machine to multi-label learning},\n author={Chen, Wei-Jie and Shao, Yuan-Hai and Li, Chun-Na and Deng, Nai-Yang},\n journal={Pattern Recognition},\n volume={52},\n pages={61--74},\n year={2016},\n publisher={Elsevier}\n }\n\n\n Examples\n --------\n\n Here's a very simple example of using MLTSVM with a fixed number of neighbors:\n\n .. code :: python\n\n from yyskmultilearn.adapt import MLTSVM\n\n classifier = MLTSVM(c_k = 2**-1)\n\n # train\n classifier.fit(X_train, y_train)\n\n # predict\n predictions = classifier.predict(X_test)\n\n\n You can also use :class:`~sklearn.model_selection.GridSearchCV` to find an optimal set of parameters:\n\n .. code :: python\n\n from yyskmultilearn.adapt import MLTSVM\n from sklearn.model_selection import GridSearchCV\n\n parameters = {'c_k': [2**i for i in range(-5, 5, 2)]}\n score = 'f1-macro\n\n clf = GridSearchCV(MLTSVM(), parameters, scoring=score)\n clf.fit(X, y)\n\n print (clf.best_params_, clf.best_score_)\n\n # output\n {'c_k': 0.03125} 0.347518217573\n\n\n \"\"\"\n\n def __init__(self, c_k=0, sor_omega=1.0, threshold=1e-6, lambda_param=1.0, max_iteration=500):\n super(MLClassifierBase, self).__init__()\n self.max_iteration = max_iteration\n self.threshold = threshold\n self.lambda_param = lambda_param # TODO: possibility to add different lambda to different labels\n self.c_k = c_k\n self.sor_omega = sor_omega\n self.copyable_attrs = ['c_k', 'sor_omega', 'lambda_param', 'threshold', 'max_iteration']\n\n def fit(self, X, Y):\n n_labels = Y.shape[1]\n m = X.shape[1] # Count of features\n self.wk_bk = np.zeros([n_labels, m + 1], dtype=float)\n\n if sp.issparse(X):\n identity_matrix = sp.identity(m + 1)\n _inv = inv_sparse\n else:\n identity_matrix = np.identity(m + 1)\n _inv = inv_dense\n\n X_bias = _hstack(X, np.ones((X.shape[0], 1), dtype=X.dtype))\n self.iteration_count = []\n for label in range(0, n_labels):\n # Calculate the parameter Q for overrelaxation\n H_k = _get_x_class_instances(X_bias, Y, label)\n G_k = _get_x_noclass_instances(X_bias, Y, label)\n Q_knoPrefixGk = _inv((H_k.T).dot(H_k) + self.lambda_param * identity_matrix).dot(G_k.T)\n Q_k = G_k.dot(Q_knoPrefixGk).A\n Q_k = (Q_k + Q_k.T) / 2.0\n\n # Calculate other\n alpha_k = self._successive_overrelaxation(self.sor_omega, Q_k)\n if sp.issparse(X):\n self.wk_bk[label] = -Q_knoPrefixGk.dot(alpha_k).T\n else:\n self.wk_bk[label] = (-np.dot(Q_knoPrefixGk, alpha_k)).T\n\n self.wk_norms = norm(self.wk_bk, axis=1)\n self.treshold = 1.0 / np.max(self.wk_norms)\n\n def predict(self, X):\n X_with_bias = _hstack(X, np.ones((X.shape[0], 1), dtype=X.dtype))\n wk_norms_multiplicated = self.wk_norms[np.newaxis, :] # change to form [[wk1, wk2, ..., wkk]]\n all_distances = (-X_with_bias.dot(self.wk_bk.T)) / wk_norms_multiplicated\n predicted_y = np.where(all_distances < self.treshold, 1, 0)\n # TODO: It's possible to add condition to: add label if no labels is in row.\n return predicted_y\n\n def _successive_overrelaxation(self, omegaW, Q):\n # Initialization\n D = np.diag(Q) # Only one dimension vector - is enough\n D_inv = 1.0 / D # D-1 simplify form\n small_l = Q.shape[1]\n oldnew_alpha = np.zeros([small_l, 1]) # buffer\n\n is_not_enough = True\n was_going_down = False\n last_alfa_norm_change = -1\n\n nr_iter = 0\n while is_not_enough: # do while\n oldAlpha = oldnew_alpha\n for j in range(0, small_l): # It's from last alpha to first\n oldnew_alpha[j] = oldAlpha[j] - omegaW * D_inv[j] * (Q[j, :].T.dot(oldnew_alpha) - 1)\n oldnew_alpha = oldnew_alpha.clip(0.0, self.c_k)\n alfa_norm_change = norm(oldnew_alpha - oldAlpha)\n\n if not was_going_down and last_alfa_norm_change > alfa_norm_change:\n was_going_down = True\n is_not_enough = alfa_norm_change > self.threshold and \\\n nr_iter < self.max_iteration \\\n and ((not was_going_down) or last_alfa_norm_change > alfa_norm_change)\n # TODO: maybe add any(oldnew_alpha != oldAlpha)\n\n last_alfa_norm_change = alfa_norm_change\n nr_iter += 1\n self.iteration_count.append(nr_iter)\n return oldnew_alpha\n\n\ndef _get_x_noclass_instances(X, Y, label_class):\n if sp.issparse(Y):\n indices = np.where(Y[:, 1].A == 0)[0]\n else:\n indices = np.where(Y[:, 1] == 0)[0]\n return X[indices, :]\n\n\ndef _get_x_class_instances(X, Y, label_class):\n if sp.issparse(Y):\n indices = Y[:, label_class].nonzero()[0]\n else:\n indices = np.nonzero(Y[:, label_class])[0]\n return X[indices, :]\n\n\ndef _hstack(X, Y):\n if sp.issparse(X):\n return sp.hstack([X, Y], format=X.format)\n else:\n return np.hstack([X, Y])\n"
] | [
[
"numpy.max",
"scipy.sparse.issparse",
"numpy.dot",
"numpy.zeros",
"scipy.sparse.hstack",
"numpy.ones",
"numpy.nonzero",
"numpy.identity",
"numpy.where",
"scipy.sparse.identity",
"numpy.hstack",
"scipy.linalg.norm",
"numpy.diag"
]
] |
HeRCLab/tvm | [
"bd14a4d36e0d364ef9bd34b2ee96cc09ce64d4b3"
] | [
"python/tvm/contrib/cudnn.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"External function interface to CuDNN v7 library.\"\"\"\n# pylint: disable-msg=C0103\nimport ctypes\nimport numpy as np\nimport tvm\n\nimport tvm._ffi\nfrom tvm import te\n\n# algos can be read from cudnn.h\n_FWD_ALGOS = [\n \"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_GEMM\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_DIRECT\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_FFT\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_COUNT\",\n]\n\n\ndef exists():\n \"\"\"\n Checks whether the local machine can use CuDNN.\n\n Returns\n -------\n exists: bool\n\n True if CuDNN support is enabled and a CuDNN-capable GPU\n exists. Otherwise, False.\n \"\"\"\n func = tvm.get_global_func(\"tvm.contrib.cudnn.exists\", allow_missing=True)\n if func is None:\n return False\n\n return bool(func())\n\n\ndef algo_to_index(algo_type, algo_name):\n \"\"\"Return a index represents the algorithm, which can be used in\n calling CuDNN function\n\n Parameters\n ----------\n algo_type : str\n [\"fwd\", \"bwd_filter\", \"bwd_data]\n\n algo_name : str\n algorithm name in cudnn definition\n fwd = [\n \"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_GEMM\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_DIRECT\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_FFT\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_COUNT\",\n ]\n bwd_filter = [\n \"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0\",\n # non-deterministic\n \"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1\",\n \"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT\",\n \"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3\",\n # non-deterministic, algo0 with workspaceS\n \"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD\",\n # not implemented\n \"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED\",\n \"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING\",\n \"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT\",\n ]\n bwd_data = [\n \"CUDNN_CONVOLUTION_BWD_DATA_ALGO_0\",\n # non-deterministic\n \"CUDNN_CONVOLUTION_BWD_DATA_ALGO_1\",\n \"CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT\",\n \"CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING\",\n \"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD\",\n \"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED\",\n \"CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT\",\n ]\n\n Returns\n -------\n algo: int\n algorithm index\n\n \"\"\"\n idx = -1\n if algo_type == \"fwd\":\n idx = _FWD_ALGOS.index(algo_name)\n elif algo_type == \"bwd_filter\":\n idx = _BWD_FILTER_ALGOS.index(algo_name)\n elif algo_type == \"bwd_data\":\n idx = _BWD_DATA_ALGOS.index(algo_name)\n assert idx >= 0\n return idx\n\n\ndef _get_np_int32_array_handle(arr):\n \"\"\"Return a void_p handle for a numpy array\n\n Parameters\n ----------\n arr: numpy.NDArray\n source numpy array\n\n Returns\n -------\n ptr: ctypes.c_void_p\n pointer to the data\n \"\"\"\n assert arr.dtype == np.int32\n ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))\n return ctypes.cast(ptr, ctypes.c_void_p)\n\n\ndef _prepare_global_func_params(dims, pad, stride, dilation, x_shape=None, w_shape=None):\n full_dims = dims + 2\n if x_shape:\n assert isinstance(x_shape, list)\n assert len(x_shape) == full_dims\n if w_shape:\n assert isinstance(w_shape, list)\n assert len(w_shape) == full_dims\n\n pad = (\n np.full(dims, pad, dtype=np.int32)\n if isinstance(pad, int)\n else np.array(pad, dtype=np.int32)\n )\n stride = (\n np.full(dims, stride, dtype=np.int32)\n if isinstance(stride, int)\n else np.array(stride, dtype=np.int32)\n )\n dilation = (\n np.full(dims, dilation, dtype=np.int32)\n if isinstance(dilation, int)\n else np.array(dilation, dtype=np.int32)\n )\n\n xshape = np.array(x_shape, dtype=np.int32) if x_shape else None\n wshape = np.array(w_shape, dtype=np.int32) if x_shape else None\n\n return pad, stride, dilation, xshape, wshape\n\n\ndef conv_output_shape(\n tensor_format, pad, stride, dilation, x_shape, w_shape, data_dtype, conv_dtype, groups=1\n):\n \"\"\"Get output shape of 2D or 3D convolution\n\n Paramters\n ---------\n tensor_format: int\n 0: CUDNN_TENSOR_NCHW\n 1: CUDNN_TENSOR_NHWC\n 2: CUDNN_TENSOR_NCHW_VECT_C\n pad: int or list\n padding\n stride: int or list\n stride\n dilation: int or list\n dilation\n x_shape: list\n input shape\n w_shape: list\n weight shape\n data_dtype: str\n data type\n conv_dtype: str\n convolution type\n groups: int\n number of groups\n\n Returns\n -------\n oshape: list\n output shape\n \"\"\"\n\n assert len(x_shape) == len(w_shape)\n assert len(x_shape) in (4, 5)\n\n if tensor_format == 0:\n n_output = x_shape[0]\n c_output = w_shape[0]\n x_chan = x_shape[1]\n w_chan_input = w_shape[1]\n x_shape = x_shape[2:]\n w_shape = w_shape[2:]\n\n elif tensor_format == 1:\n n_output = x_shape[0]\n c_output = w_shape[0]\n x_chan = x_shape[-1]\n w_chan_input = w_shape[-1]\n assert len(x_shape) == 4, \"CuDNN layout NHWC is only well-defined for 4d tensors\"\n x_shape = x_shape[1:-1]\n w_shape = w_shape[1:-1]\n\n elif tensor_format == 2:\n n_output = x_shape[0]\n c_output = w_shape[0]\n x_chan = x_shape[1]\n w_chan_input = w_shape[1]\n w_lanes = tvm.runtime.DataType(conv_dtype).lanes\n assert w_lanes == 1\n x_shape = x_shape[2:]\n w_shape = w_shape[2:]\n\n else:\n raise ValueError(\"Unknown CuDNN tensor format: '{}'\".format(tensor_format))\n\n x_lanes = tvm.runtime.DataType(data_dtype).lanes\n assert x_chan * x_lanes == w_chan_input * groups, (\n \"Mismatched dimensions, data has {} channels/group \"\n \"(dimension {} with {} lanes/value, {} groups), \"\n \"but weights require {} input channels/group\"\n ).format(x_chan // groups, x_chan, x_lanes, groups, w_chan_input)\n\n output_dims = []\n for x_shape_i, w_shape_i, pad_i, stride_i, dilation_i in zip(\n x_shape, w_shape, pad, stride, dilation\n ):\n output_dim = 1 + (x_shape_i + 2 * pad_i - (((w_shape_i - 1) * dilation_i) + 1)) // stride_i\n output_dims.append(output_dim)\n\n if tensor_format in [0, 2]:\n output = [n_output, c_output, *output_dims]\n elif tensor_format == 1:\n output = [n_output, *output_dims, c_output]\n else:\n raise ValueError(\"Unknown CuDNN tensor format: '{}'\".format(tensor_format))\n\n return output\n\n\ndef conv_dgrad_shape(\n tensor_format, pad, stride, dilation, dy_shape, w_shape, output_padding=(0, 0), groups=1\n):\n \"\"\"Get output shape of conv2d gradient with respect to data\n\n Paramters\n ---------\n tensor_format: int\n 0: CUDNN_TENSOR_NCHW\n 1: CUDNN_TENSOR_NHWC\n pad: int or list\n padding\n stride: int or list\n stride\n dilation: int or list\n dilation\n dy_shape: list\n output gradient shape\n w_shape: list\n weight shape\n data_dtype: str\n data type\n conv_dtype: str\n convolution type\n groups: int\n number of groups\n\n Returns\n -------\n oshape: list\n output shape\n \"\"\"\n\n assert len(dy_shape) == len(w_shape)\n assert len(dy_shape) == 4\n\n if tensor_format == 0:\n N = dy_shape[0]\n C = w_shape[1] * groups\n dy_shape = dy_shape[2:]\n w_shape = w_shape[2:]\n elif tensor_format == 1:\n N = dy_shape[0]\n C = w_shape[-1] * groups\n dy_shape = dy_shape[1:-1]\n w_shape = w_shape[1:-1]\n else:\n raise ValueError(\"Unsupported CuDNN tensor format: '{}'\".format(tensor_format))\n\n input_dims = []\n for dy_shape_i, w_shape_i, pad_i, stride_i, dilation_i, out_pad in zip(\n dy_shape, w_shape, pad, stride, dilation, output_padding\n ):\n input_dim = (\n (dy_shape_i - 1) * stride_i - 2 * pad_i + (((w_shape_i - 1) * dilation_i) + 1) + out_pad\n )\n input_dims.append(input_dim)\n\n if tensor_format == 0:\n output = [N, C, *input_dims]\n else:\n output = [N, *input_dims, C]\n\n return output\n\n\ndef _conv_find_algo(\n func_name,\n tensor_format,\n pad,\n stride,\n dilation,\n x_shape,\n w_shape,\n y_shape,\n data_dtype,\n conv_dtype,\n groups=1,\n):\n \"\"\"\n Common function to choose the best cudnn convolution algorithm for the given input\n and the convolution type.\n \"\"\"\n dims = len(x_shape)\n assert dims in (4, 5)\n\n pad, stride, dilation, xshape, wshape = _prepare_global_func_params(\n dims - 2, pad, stride, dilation, x_shape, w_shape\n )\n yshape = np.array(y_shape, dtype=np.int32)\n func = tvm._ffi.get_global_func(func_name)\n return func(\n tensor_format,\n dims - 2,\n _get_np_int32_array_handle(pad),\n _get_np_int32_array_handle(stride),\n _get_np_int32_array_handle(dilation),\n _get_np_int32_array_handle(xshape),\n _get_np_int32_array_handle(wshape),\n _get_np_int32_array_handle(yshape),\n data_dtype,\n conv_dtype,\n groups,\n )\n\n\ndef conv_forward_find_algo(\n tensor_format,\n pad,\n stride,\n dilation,\n x_shape,\n w_shape,\n y_shape,\n data_dtype,\n conv_dtype,\n groups=1,\n):\n \"\"\"Choose the best forward algorithm for the given input.\n\n Paramters\n ---------\n tensor_format: int\n 0: CUDNN_TENSOR_NCHW\n 1: CUDNN_TENSOR_NHWC\n 2: CUDNN_TENSOR_NCHW_VECT_C\n pad: int or list\n padding\n stride: int or list\n stride\n dilation: int or list\n dilation\n x_shape: list\n input shape\n w_shape: list\n weight shape\n y_shape: list\n output shape\n data_dtype: str\n data type\n conv_dtype: str\n convolution type\n groups: int\n number of groups\n\n Returns\n -------\n algo: int\n algo chosen by CUDNN\n \"\"\"\n return _conv_find_algo(\n \"tvm.contrib.cudnn.conv.forward_find_algo\",\n tensor_format,\n pad,\n stride,\n dilation,\n x_shape,\n w_shape,\n y_shape,\n data_dtype,\n conv_dtype,\n groups,\n )\n\n\ndef conv_backward_data_find_algo(\n tensor_format,\n pad,\n stride,\n dilation,\n dy_shape,\n w_shape,\n dx_shape,\n data_dtype,\n conv_dtype,\n groups=1,\n):\n \"\"\"Choose the best backward data algorithm for the given input.\n\n Paramters\n ---------\n tensor_format: int\n 0: CUDNN_TENSOR_NCHW\n 1: CUDNN_TENSOR_NHWC\n 2: CUDNN_TENSOR_NCHW_VECT_C\n pad: int or list\n padding\n stride: int or list\n stride\n dilation: int or list\n dilation\n dy_shape: list\n output gradient shape\n w_shape: list\n weight shape\n dx_shape: list\n dgrad shape\n data_dtype: str\n data type\n conv_dtype: str\n convolution type\n groups: int\n number of groups\n\n Returns\n -------\n algo: int\n algo chosen by CUDNN\n \"\"\"\n return _conv_find_algo(\n \"tvm.contrib.cudnn.conv.backward_data_find_algo\",\n tensor_format,\n pad,\n stride,\n dilation,\n dy_shape,\n w_shape,\n dx_shape,\n data_dtype,\n conv_dtype,\n groups,\n )\n\n\ndef conv_backward_filter_find_algo(\n tensor_format,\n pad,\n stride,\n dilation,\n dy_shape,\n x_shape,\n dw_shape,\n data_dtype,\n conv_dtype,\n groups=1,\n):\n \"\"\"Choose the best backward filter algorithm for the given input.\n\n Paramters\n ---------\n tensor_format: int\n 0: CUDNN_TENSOR_NCHW\n 1: CUDNN_TENSOR_NHWC\n 2: CUDNN_TENSOR_NCHW_VECT_C\n pad: int or list\n padding\n stride: int or list\n stride\n dilation: int or list\n dilation\n dy_shape: list\n output gradient shape\n x_shape: list\n weight shape\n dw_shape: list\n wgrad shape\n data_dtype: str\n data type\n conv_dtype: str\n convolution type\n groups: int\n number of groups\n\n Returns\n -------\n algo: int\n algo chosen by CUDNN\n \"\"\"\n return _conv_find_algo(\n \"tvm.contrib.cudnn.conv.backward_filter_find_algo\",\n tensor_format,\n pad,\n stride,\n dilation,\n dy_shape,\n x_shape,\n dw_shape,\n data_dtype,\n conv_dtype,\n groups,\n )\n\n\ndef conv_forward(x, w, pad, stride, dilation, conv_mode, tensor_format, algo, conv_dtype, groups=1):\n \"\"\"Create an extern op that compute 2D or 3D convolution with CuDNN\n\n Parameters\n ----------\n x: Tensor\n input feature map\n w: Tensor\n convolution weight\n pad: int or list\n padding\n stride: int or list\n stride\n dilation: int or list\n dilation\n conv_mode: int\n 0: CUDNN_CONVOLUTION\n 1: CUDNN_CROSS_CORRELATION\n tensor_format: int\n 0: CUDNN_TENSOR_NCHW\n 1: CUDNN_TENSOR_NHWC\n 2: CUDNN_TENSOR_NCHW_VECT_C\n algo: int\n Forward algorithm, get index from ```algo_to_index``` function\n if algo == -1, the best algo will be chosen by CUDNN\n conv_dtype: str\n convolution type\n groups: int\n the number of groups\n\n Returns\n -------\n y: Tensor\n The result tensor\n \"\"\"\n dims = len(x.shape)\n assert dims in (4, 5)\n\n conv_dtype = x.dtype if conv_dtype is None else conv_dtype\n pad, stride, dilation, _, _ = _prepare_global_func_params(dims - 2, pad, stride, dilation)\n\n x_shape = list(x.shape)\n\n if isinstance(x.shape[0], tvm.tir.expr.IntImm):\n oshape = conv_output_shape(\n tensor_format,\n pad,\n stride,\n dilation,\n x_shape,\n list(w.shape),\n x.dtype,\n conv_dtype,\n groups,\n )\n if algo == -1:\n # For now if we try to call `cudnnFindConvolutionForwardAlgorithm` when\n # using INT8 data type, CuDNN will crash down.\n # On the other hand, CuDNN only support IMPLICIT_PRECOMP_GEMM at NHWC format\n if tensor_format == 1 and conv_dtype == \"int32\":\n algo = 1\n else:\n algo = conv_forward_find_algo(\n tensor_format,\n pad,\n stride,\n dilation,\n list(x.shape),\n list(w.shape),\n oshape,\n x.dtype,\n conv_dtype,\n groups,\n )\n else:\n # The dynamic batch size case, pretend this is a single batch\n x_shape[0] = 1\n oshape = conv_output_shape(\n tensor_format,\n pad,\n stride,\n dilation,\n x_shape,\n list(w.shape),\n x.dtype,\n conv_dtype,\n groups,\n )\n oshape[0] = x.shape[0]\n # This picks CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM\n # It seems this is the fastest among algorithms that are always applicable\n algo = 1\n\n if dims == 4:\n return te.extern(\n oshape,\n [x, w],\n lambda ins, outs: tvm.tir.call_packed(\n \"tvm.contrib.cudnn.conv2d.forward\",\n conv_mode,\n tensor_format,\n algo,\n pad[0],\n pad[1],\n stride[0],\n stride[1],\n dilation[0],\n dilation[1],\n ins[0],\n ins[1],\n outs[0],\n conv_dtype,\n groups,\n ),\n name=\"y\",\n )\n\n return te.extern(\n oshape,\n [x, w],\n lambda ins, outs: tvm.tir.call_packed(\n \"tvm.contrib.cudnn.conv3d.forward\",\n conv_mode,\n tensor_format,\n algo,\n pad[0],\n pad[1],\n pad[2],\n stride[0],\n stride[1],\n stride[2],\n dilation[0],\n dilation[1],\n dilation[2],\n ins[0],\n ins[1],\n outs[0],\n conv_dtype,\n groups,\n ),\n name=\"y\",\n )\n\n\ndef conv_backward_data(\n dy,\n w,\n pad,\n stride,\n dilation,\n conv_mode,\n tensor_format,\n conv_dtype,\n groups=1,\n output_padding=(0, 0),\n):\n \"\"\"Create a CuDNN extern op that computes the gradient of 2D convolution with respect to data.\n\n Parameters\n ----------\n dy: Tensor\n output gradient\n w: Tensor\n convolution weight\n pad: int or list\n padding\n stride: int or list\n stride\n dilation: int or list\n dilation\n conv_mode: int\n 0: CUDNN_CONVOLUTION\n 1: CUDNN_CROSS_CORRELATION\n tensor_format: int\n 0: CUDNN_TENSOR_NCHW\n 1: CUDNN_TENSOR_NHWC\n conv_dtype: str\n convolution type\n groups: int\n the number of groups\n\n Returns\n -------\n dx: Tensor\n dgrad tensor\n \"\"\"\n dims = len(dy.shape)\n assert dims == 4\n\n conv_dtype = dy.dtype if conv_dtype is None else conv_dtype\n pad, stride, dilation, _, _ = _prepare_global_func_params(dims - 2, pad, stride, dilation)\n\n assert isinstance(\n dy.shape[0], tvm.tir.expr.IntImm\n ), \"Dynamic batch is not supported for cudnn conv2d backwad data yet.\"\n\n dx_shape = conv_dgrad_shape(\n tensor_format, pad, stride, dilation, dy.shape, w.shape, output_padding, groups\n )\n\n algo = conv_backward_data_find_algo(\n tensor_format,\n pad,\n stride,\n dilation,\n list(dy.shape),\n list(w.shape),\n dx_shape,\n dy.dtype,\n conv_dtype,\n groups,\n )\n\n return te.extern(\n dx_shape,\n [dy, w],\n lambda ins, outs: tvm.tir.call_packed(\n \"tvm.contrib.cudnn.conv2d.backward_data\",\n conv_mode,\n tensor_format,\n algo,\n pad[0],\n pad[1],\n stride[0],\n stride[1],\n dilation[0],\n dilation[1],\n ins[0],\n ins[1],\n outs[0],\n conv_dtype,\n groups,\n ),\n name=\"dx\",\n )\n\n\ndef conv_backward_filter(\n dy, x, kernel_size, pad, stride, dilation, conv_mode, tensor_format, conv_dtype, groups=1\n):\n \"\"\"Create a CuDNN extern op that computes the gradient of 2D convolution with respect to weight.\n\n Parameters\n ----------\n dy: Tensor\n output gradient\n x: Tensor\n input tensor\n kernel_size: a pair of int\n The spatial size of the corresponding forward convolution kernel\n pad: int or list\n padding\n stride: int or list\n stride\n dilation: int or list\n dilation\n conv_mode: int\n 0: CUDNN_CONVOLUTION\n 1: CUDNN_CROSS_CORRELATION\n tensor_format: int\n 0: CUDNN_TENSOR_NCHW\n 1: CUDNN_TENSOR_NHWC\n conv_dtype: str\n convolution type\n groups: int\n the number of groups\n\n Returns\n -------\n dw: Tensor\n wgrad tensor\n \"\"\"\n dims = len(x.shape)\n assert dims == 4\n\n conv_dtype = x.dtype if conv_dtype is None else conv_dtype\n pad, stride, dilation, _, _ = _prepare_global_func_params(dims - 2, pad, stride, dilation)\n filter_h, filter_w = kernel_size\n\n x_shape = list(x.shape)\n\n assert isinstance(\n x.shape[0], tvm.tir.expr.IntImm\n ), \"Dynamic batch is not supported for cudnn conv2d backwad filter yet.\"\n\n ic_ind = 1 if tensor_format == 0 else 3\n\n if groups > 1:\n assert (\n x_shape[ic_ind] == dy.shape[ic_ind] and x_shape[ic_ind] == groups\n ), \"Only depthwise wgrad supported for groups > 1.\"\n ic = 1\n else:\n ic = x_shape[ic_ind]\n\n if tensor_format == 0:\n dw_shape = [dy.shape[1], ic, filter_h, filter_w]\n else:\n dw_shape = [dy.shape[3], filter_h, filter_w, ic]\n\n algo = conv_backward_filter_find_algo(\n tensor_format,\n pad,\n stride,\n dilation,\n list(dy.shape),\n list(x.shape),\n dw_shape,\n x.dtype,\n conv_dtype,\n groups,\n )\n\n return te.extern(\n dw_shape,\n [dy, x],\n lambda ins, outs: tvm.tir.call_packed(\n \"tvm.contrib.cudnn.conv2d.backward_filter\",\n conv_mode,\n tensor_format,\n algo,\n pad[0],\n pad[1],\n stride[0],\n stride[1],\n dilation[0],\n dilation[1],\n ins[0],\n ins[1],\n outs[0],\n conv_dtype,\n groups,\n ),\n name=\"dw\",\n )\n\n\ndef softmax(x, axis=-1):\n \"\"\"Compute softmax using CuDNN\n\n Parameters\n ----------\n x : tvm.te.Tensor\n The input tensor\n\n axis : int\n The axis to compute the softmax\n\n Returns\n -------\n ret : tvm.te.Tensor\n The result tensor\n \"\"\"\n return te.extern(\n x.shape,\n [x],\n lambda ins, outs: tvm.tir.call_packed(\n \"tvm.contrib.cudnn.softmax.forward\", ins[0], outs[0], axis\n ),\n name=\"y\",\n )\n\n\ndef log_softmax(x, axis=-1):\n \"\"\"Compute log_softmax using CuDNN\n\n Parameters\n ----------\n x : tvm.te.Tensor\n The input tensor\n\n axis : int\n The axis to compute log softmax over\n\n Returns\n -------\n ret : tvm.te.Tensor\n The result tensor\n \"\"\"\n return te.extern(\n x.shape,\n [x],\n lambda ins, outs: tvm.tir.call_packed(\n \"tvm.contrib.cudnn.log_softmax.forward\", ins[0], outs[0], axis\n ),\n name=\"y\",\n )\n"
] | [
[
"numpy.full",
"numpy.array"
]
] |
cor-e-software/nbeatsx | [
"6e0f6dd7c9ff196ac9b71f059fc52c2df3adfb56"
] | [
"src/utils/pytorch/ts_loader.py"
] | [
"import numpy as np\nimport pandas as pd\nimport random\nimport torch as t\nimport copy\nfrom src.utils.pytorch.ts_dataset import TimeSeriesDataset\nfrom collections import defaultdict\n\n\nclass TimeSeriesLoader(object):\n def __init__(self,\n ts_dataset:TimeSeriesDataset,\n model:str,\n offset:int,\n window_sampling_limit: int,\n input_size: int,\n output_size: int,\n idx_to_sample_freq: int,\n batch_size: int,\n is_train_loader: bool,\n shuffle:bool):\n \"\"\"\n Time Series Loader object, used to sample time series from TimeSeriesDataset object.\n Parameters\n ----------\n ts_dataset: TimeSeriesDataset\n Time Series Dataet object which contains data in PyTorch tensors optimized for sampling.\n model: str ['nbeats']\n Model which will use the loader, affects the way of constructing batches.\n offset: int\n Equivalent to timestamps in test (data in test will not be sampled). It is used to filter\n the PyTorch tensor containing the time series, to avoid using the future during training.\n window_sampling_limit: int\n Equivalent to calibration window. Length of the history (prior to offset) which will be sampled\n input_size: int\n Size of inputs of each window (only for NBEATS), eg. 7 days\n ouput_size: int\n Forecasting horizon\n idx_to_sample_freq: int\n Frequency of sampling. Eg: 1 for data_augmentation, 24 for sampling only at 12:00am\n batch_size: int\n Number of batches (windows) to sample\n is_train_loader: bool\n True: will only sample time stamps with 1s in mask, False: will only sample time stamps with 0s in mask\n shuffle: bool\n Indicates if windows should be shuffled. True is used for training and False for predicting.\n \"\"\"\n # Dataloader attributes\n self.model = model\n self.window_sampling_limit = window_sampling_limit\n self.input_size = input_size\n self.output_size = output_size\n self.batch_size = batch_size\n self.idx_to_sample_freq = idx_to_sample_freq\n self.offset = offset\n self.ts_dataset = ts_dataset\n self.t_cols = self.ts_dataset.t_cols\n self.is_train_loader = is_train_loader # Boolean variable for train and validation mask\n self.shuffle = shuffle # Boolean to shuffle data, useful for validation\n\n # Create rolling window matrix in advanced for faster access to data and broadcasted s_matrix\n self._create_train_data()\n\n def _update_sampling_windows_idxs(self):\n\n # Only sample during training windows with at least one active output mask and input mask\n outsample_condition = t.sum(self.ts_windows[:, self.t_cols.index('outsample_mask'), -self.output_size:], axis=1)\n insample_condition = t.sum(self.ts_windows[:, self.t_cols.index('insample_mask'), :self.input_size], axis=1)\n sampling_idx = t.nonzero(outsample_condition * insample_condition > 0) #element-wise product\n sampling_idx = list(sampling_idx.flatten().numpy())\n return sampling_idx\n\n def _create_windows_tensor(self):\n \"\"\"\n Comment here\n TODO: Cuando creemos el otro dataloader, si es compatible lo hacemos funcion transform en utils\n \"\"\"\n # Memory efficiency is gained from keeping across dataloaders common ts_tensor in dataset\n # Filter function is used to define train tensor and validation tensor with the offset\n # Default ts_idxs=ts_idxs sends all the data\n tensor, right_padding, train_mask = self.ts_dataset.get_filtered_ts_tensor(offset=self.offset, output_size=self.output_size,\n window_sampling_limit=self.window_sampling_limit)\n tensor = t.Tensor(tensor)\n train_mask = t.Tensor(train_mask)\n\n # Outsample mask checks existance of values in ts, train_mask mask is used to filter out validation\n # is_train_loader inverts the train_mask in case the dataloader is in validation mode\n mask = train_mask if self.is_train_loader else (1 - train_mask)\n tensor[:, self.t_cols.index('outsample_mask'), :] = tensor[:, self.t_cols.index('outsample_mask'), :] * mask\n\n padder = t.nn.ConstantPad1d(padding=(self.input_size, right_padding), value=0)\n tensor = padder(tensor)\n\n # Last output_size outsample_mask and y to 0\n tensor[:, self.t_cols.index('y'), -self.output_size:] = 0 # overkill to ensure no validation leakage\n tensor[:, self.t_cols.index('outsample_mask'), -self.output_size:] = 0\n\n # Creating rolling windows and 'flattens' them\n windows = tensor.unfold(dimension=-1, size=self.input_size + self.output_size, step=self.idx_to_sample_freq)\n # n_serie, n_channel, n_time, window_size -> n_serie, n_time, n_channel, window_size\n #print(f'n_serie, n_channel, n_time, window_size = {windows.shape}')\n windows = windows.permute(0,2,1,3)\n #print(f'n_serie, n_time, n_channel, window_size = {windows.shape}')\n windows = windows.reshape(-1, self.ts_dataset.n_channels, self.input_size + self.output_size)\n\n # Broadcast s_matrix: This works because unfold in windows_tensor, orders: time, serie\n s_matrix = self.ts_dataset.s_matrix.repeat(repeats=int(len(windows)/self.ts_dataset.n_series), axis=0)\n\n return windows, s_matrix\n\n def __len__(self):\n return len(self.len_series)\n\n def __iter__(self):\n if self.shuffle:\n sample_idxs = np.random.choice(a=self.windows_sampling_idx,\n size=len(self.windows_sampling_idx), replace=False)\n else:\n sample_idxs = self.windows_sampling_idx\n\n assert len(sample_idxs)>0, 'Check the data as sample_idxs are empty'\n\n n_batches = int(np.ceil(len(sample_idxs) / self.batch_size)) # Must be multiple of batch_size for paralel gpu\n\n for idx in range(n_batches):\n ws_idxs = sample_idxs[(idx * self.batch_size) : (idx + 1) * self.batch_size]\n batch = self.__get_item__(index=ws_idxs)\n yield batch\n\n def __get_item__(self, index):\n if self.model == 'nbeats':\n return self._nbeats_batch(index)\n elif self.model == 'esrnn':\n assert 1<0, 'hacer esrnn'\n else:\n assert 1<0, 'error'\n\n def _nbeats_batch(self, index):\n # Access precomputed rolling window matrix (RAM intensive)\n windows = self.ts_windows[index]\n s_matrix = self.s_matrix[index]\n\n insample_y = windows[:, self.t_cols.index('y'), :self.input_size]\n insample_x = windows[:, (self.t_cols.index('y')+1):self.t_cols.index('insample_mask'), :self.input_size]\n insample_mask = windows[:, self.t_cols.index('insample_mask'), :self.input_size]\n\n outsample_y = windows[:, self.t_cols.index('y'), self.input_size:]\n outsample_x = windows[:, (self.t_cols.index('y')+1):self.t_cols.index('insample_mask'), self.input_size:]\n outsample_mask = windows[:, self.t_cols.index('outsample_mask'), self.input_size:]\n\n batch = {'s_matrix': s_matrix,\n 'insample_y': insample_y, 'insample_x':insample_x, 'insample_mask':insample_mask,\n 'outsample_y': outsample_y, 'outsample_x':outsample_x, 'outsample_mask':outsample_mask}\n return batch\n\n def _create_train_data(self):\n \"\"\"\n \"\"\"\n # Create rolling window matrix for fast information retrieval\n self.ts_windows, self.s_matrix = self._create_windows_tensor()\n self.n_windows = len(self.ts_windows)\n self.windows_sampling_idx = self._update_sampling_windows_idxs()\n\n def update_offset(self, offset):\n if offset == self.offset:\n return # Avoid extra computation\n self.offset = offset\n self._create_train_data()\n\n def get_meta_data_col(self, col):\n return self.ts_dataset.get_meta_data_col(col)\n\n def get_n_variables(self):\n return self.ts_dataset.n_x, self.ts_dataset.n_s\n\n def get_n_series(self):\n return self.ts_dataset.n_series\n\n def get_max_len(self):\n return self.ts_dataset.max_len\n\n def get_n_channels(self):\n return self.ts_dataset.n_channels\n\n def get_X_cols(self):\n return self.ts_dataset.X_cols\n\n def get_frequency(self):\n return self.ts_dataset.frequency"
] | [
[
"torch.nonzero",
"torch.nn.ConstantPad1d",
"torch.Tensor"
]
] |
bioidiap/bob.ip.tensorflow_extractor | [
"14ab1f878a352e1075c31d94c715b4f7556e7afb"
] | [
"bob/ip/tensorflow_extractor/MTCNN.py"
] | [
"# code and model from https://github.com/blaueck/tf-mtcnn\nimport pkg_resources\nimport tensorflow as tf\nimport multiprocessing\nimport bob.io.image\n\n\nMODEL_PATH = pkg_resources.resource_filename(__name__, \"data/mtcnn/mtcnn.pb\")\n\n\nclass MTCNN:\n\n \"\"\"MTCNN v1 wrapper. See\n https://kpzhang93.github.io/MTCNN_face_detection_alignment/index.html for more\n details on MTCNN and see :ref:`bob.ip.tensorflow_extractor.face_detect` for an\n example code.\n\n Attributes\n ----------\n factor : float\n Factor is a trade-off between performance and speed.\n min_size : int\n Minimum face size to be detected.\n thresholds : list\n thresholds are a trade-off between false positives and missed detections.\n \"\"\"\n\n def __init__(\n self,\n min_size=40,\n factor=0.709,\n thresholds=(0.6, 0.7, 0.7),\n model_path=MODEL_PATH,\n ):\n self.min_size = min_size\n self.factor = factor\n self.thresholds = thresholds\n\n graph = tf.Graph()\n with graph.as_default():\n with open(model_path, \"rb\") as f:\n graph_def = tf.compat.v1.GraphDef.FromString(f.read())\n tf.import_graph_def(graph_def, name=\"\")\n self.graph = graph\n config = tf.compat.v1.ConfigProto(\n intra_op_parallelism_threads=multiprocessing.cpu_count(),\n inter_op_parallelism_threads=multiprocessing.cpu_count(),\n )\n self.sess = tf.compat.v1.Session(graph=graph, config=config)\n\n def detect(self, img):\n \"\"\"Detects all faces in the image.\n\n Parameters\n ----------\n img : numpy.ndarray\n An RGB image in Bob format.\n\n Returns\n -------\n tuple\n A tuple of boxes, probabilities, and landmarks.\n \"\"\"\n # assuming img is Bob format and RGB\n assert img.shape[0] == 3, img.shape\n # network expects BGR opencv format\n img = bob.io.image.to_matplotlib(img)\n img = img[..., ::-1]\n feeds = {\n self.graph.get_operation_by_name(\"input\").outputs[0]: img,\n self.graph.get_operation_by_name(\"min_size\").outputs[0]: self.min_size,\n self.graph.get_operation_by_name(\"thresholds\").outputs[0]: self.thresholds,\n self.graph.get_operation_by_name(\"factor\").outputs[0]: self.factor,\n }\n fetches = [\n self.graph.get_operation_by_name(\"prob\").outputs[0],\n self.graph.get_operation_by_name(\"landmarks\").outputs[0],\n self.graph.get_operation_by_name(\"box\").outputs[0],\n ]\n prob, landmarks, box = self.sess.run(fetches, feeds)\n return box, prob, landmarks\n\n def annotations(self, img):\n \"\"\"Detects all faces in the image\n\n Parameters\n ----------\n img : numpy.ndarray\n An RGB image in Bob format.\n\n Returns\n -------\n list\n A list of annotations. Annotations are dictionaries that contain the\n following keys: ``topleft``, ``bottomright``, ``reye``, ``leye``, ``nose``,\n ``mouthright``, ``mouthleft``, and ``quality``.\n \"\"\"\n boxes, scores, landmarks = self.detect(img)\n annots = []\n for box, prob, lm in zip(boxes, scores, landmarks):\n topleft = box[0], box[1]\n bottomright = box[2], box[3]\n right_eye = lm[0], lm[5]\n left_eye = lm[1], lm[6]\n nose = lm[2], lm[7]\n mouthright = lm[3], lm[8]\n mouthleft = lm[4], lm[9]\n annots.append(\n {\n \"topleft\": topleft,\n \"bottomright\": bottomright,\n \"reye\": right_eye,\n \"leye\": left_eye,\n \"nose\": nose,\n \"mouthright\": mouthright,\n \"mouthleft\": mouthleft,\n \"quality\": prob,\n }\n )\n return annots\n\n def __call__(self, img):\n \"\"\"Wrapper for the annotations method.\n \"\"\"\n return self.annotations(img)\n"
] | [
[
"tensorflow.compat.v1.Session",
"tensorflow.Graph",
"tensorflow.import_graph_def"
]
] |
gkbal/PlasmaPy | [
"e000129f3c2d41e5ab77c1b1df8f1b2e9ab09fbd"
] | [
"plasmapy/plasma/sources/tests/test_plasmablob.py"
] | [
"import astropy.units as u\nimport numpy as np\nimport pytest\n\nfrom plasmapy.formulary import magnetostatics\nfrom plasmapy.particles.exceptions import InvalidParticleError\nfrom plasmapy.plasma.sources import plasma3d, plasmablob\nfrom plasmapy.utils.exceptions import CouplingWarning\n\n\[email protected](\n \"grid_dimensions, expected_size\",\n [\n ((100, 1, 1), 100), # Test 1D setup\n ((128, 128, 1), 16384), # 2D\n ((64, 64, 64), 262144), # 3D\n ],\n)\ndef test_Plasma3D_setup(grid_dimensions, expected_size):\n r\"\"\"Function to test basic setup of the Plasma3D object.\n\n Tests that a Plasma3D object initiated with a particular\n specification behaves in the correct way.\n\n Parameters\n ----------\n grid_dimensions : tuple of ints\n Grid size of the Plasma3D object to test. Must be a tuple of\n length 3, indicating length of the grid in x, y, and z\n directions respectively. Directions not needed should have a\n length of 1.\n\n expected_size : int\n Product of grid dimensions.\n\n Examples\n --------\n >>> test_Plasma3D_setup((10, 10, 10), 1000)\n >>> test_Plasma3D_setup((100, 10, 1), 1000)\n \"\"\"\n x, y, z = grid_dimensions\n test_plasma = plasma3d.Plasma3D(\n domain_x=np.linspace(0, 1, x) * u.m,\n domain_y=np.linspace(0, 1, y) * u.m,\n domain_z=np.linspace(0, 1, z) * u.m,\n )\n\n # Basic grid setup\n assert test_plasma.x.size == x\n assert test_plasma.y.size == y\n assert test_plasma.z.size == z\n assert test_plasma.grid.size == 3 * expected_size\n\n # Core variable units and shapes\n assert test_plasma.density.size == expected_size\n assert test_plasma.density.si.unit == u.kg / u.m ** 3\n\n assert test_plasma.momentum.size == 3 * expected_size\n assert test_plasma.momentum.si.unit == u.kg / (u.m ** 2 * u.s)\n\n assert test_plasma.pressure.size == expected_size\n assert test_plasma.pressure.si.unit == u.Pa\n\n assert test_plasma.magnetic_field.size == 3 * expected_size\n assert test_plasma.magnetic_field.si.unit == u.T\n\n assert test_plasma.electric_field.size == 3 * expected_size\n assert test_plasma.electric_field.si.unit == u.V / u.m\n\n\n# @pytest.mark.parametrize([()])\ndef test_Plasma3D_derived_vars():\n r\"\"\"Function to test derived variables of the Plasma3D class.\n\n Tests the shapes, units and values of variables derived from core\n variables. The core variables are set with arbitrary uniform\n values.\n \"\"\"\n test_plasma = plasma3d.Plasma3D(\n domain_x=np.linspace(0, 1, 64) * u.m,\n domain_y=np.linspace(0, 1, 64) * u.m,\n domain_z=np.linspace(0, 1, 1) * u.m,\n )\n\n # Set an arbitrary uniform values throughout the plasma\n test_plasma.density[...] = 2.0 * u.kg / u.m ** 3\n test_plasma.momentum[...] = 10.0 * u.kg / (u.m ** 2 * u.s)\n test_plasma.pressure[...] = 1 * u.Pa\n test_plasma.magnetic_field[...] = 0.01 * u.T\n test_plasma.electric_field[...] = 0.01 * u.V / u.m\n\n # Test derived variable units and shapes\n assert test_plasma.velocity.shape == test_plasma.momentum.shape\n assert (test_plasma.velocity == 5.0 * u.m / u.s).all()\n\n assert (\n test_plasma.magnetic_field_strength.shape\n == test_plasma.magnetic_field.shape[1:]\n )\n assert test_plasma.magnetic_field_strength.si.unit == u.T\n assert np.allclose(test_plasma.magnetic_field_strength.value, 0.017320508)\n\n assert (\n test_plasma.electric_field_strength.shape\n == test_plasma.electric_field.shape[1:]\n )\n assert test_plasma.electric_field_strength.si.unit == u.V / u.m\n\n assert test_plasma.alfven_speed.shape == test_plasma.density.shape\n assert test_plasma.alfven_speed.unit.si == u.m / u.s\n assert np.allclose(test_plasma.alfven_speed.value, 10.92548431)\n\n\[email protected]\ndef test_Plasma3D_add_magnetostatics():\n r\"\"\"Function to test add_magnetostatic function\n \"\"\"\n dipole = magnetostatics.MagneticDipole(\n np.array([0, 0, 1]) * u.A * u.m * u.m, np.array([0, 0, 0]) * u.m\n )\n cw = magnetostatics.CircularWire(\n np.array([0, 0, 1]), np.array([0, 0, 0]) * u.m, 1 * u.m, 1 * u.A\n )\n gw_cw = cw.to_GeneralWire()\n iw = magnetostatics.InfiniteStraightWire(\n np.array([0, 1, 0]), np.array([0, 0, 0]) * u.m, 1 * u.A\n )\n plasma = plasma3d.Plasma3D(\n domain_x=np.linspace(-2, 2, 30) * u.m,\n domain_y=np.linspace(0, 0, 1) * u.m,\n domain_z=np.linspace(-2, 2, 20) * u.m,\n )\n\n plasma.add_magnetostatic(dipole, cw, gw_cw, iw)\n\n\nclass Test_PlasmaBlobRegimes:\n def test_intermediate_coupling(self):\n r\"\"\"\n Method to test for coupling parameter for a plasma.\n\n Tests against expected value for coupling parameter for a\n plasma in the intermediate coupling regime.\n\n The input values in this case have no special significance\n and are just to get the desired output.\n \"\"\"\n\n T_e = 25 * 15e3 * u.K\n n_e = 1e26 * u.cm ** -3\n Z = 2.0\n particle = \"p\"\n blob = plasmablob.PlasmaBlob(T_e=T_e, n_e=n_e, Z=Z, particle=particle)\n\n # expect_regime = 'Intermediate coupling regime: Gamma = 10.585076050938532.'\n expect_regime = f\"Intermediate coupling regime: Gamma = {blob.coupling()}.\"\n regime, _ = blob.regimes()\n testTrue = regime == expect_regime\n\n errStr = f\"Regime should be {expect_regime}, but got {regime} instead.\"\n assert testTrue, errStr\n\n def test_strongly_coupled(self):\n r\"\"\"\n Method to test for coupling parameter for a plasma.\n\n Tests against expected value for coupling parameter for a\n plasma in the strongly coupled regime.\n\n The input values in this case have no special significance\n and are just to get the desired output.\n \"\"\"\n\n T_e = 5 * 15e3 * u.K\n n_e = 1e26 * u.cm ** -3\n Z = 3.0\n particle = \"p\"\n blob = plasmablob.PlasmaBlob(T_e=T_e, n_e=n_e, Z=Z, particle=particle)\n\n # expect_regime = 'Strongly coupled regime: Gamma = 104.02780112828943.'\n expect_regime = f\"Strongly coupled regime: Gamma = {blob.coupling()}.\"\n\n regime, _ = blob.regimes()\n testTrue = regime == expect_regime\n\n errStr = f\"Regime should be {expect_regime}, but got {regime} instead.\"\n assert testTrue, errStr\n\n def test_weakly_coupled(self):\n r\"\"\"\n Method to test for coupling parameter for a plasma.\n\n Tests against expected value for coupling parameter for a\n plasma in the weakly coupled regime.\n\n The input values in this case have no special significance\n and are just to get the desired output.\n \"\"\"\n\n T_e = 15 * 11e3 * u.K\n n_e = 1e15 * u.cm ** -3\n Z = 2.5\n particle = \"p\"\n blob = plasmablob.PlasmaBlob(T_e=T_e, n_e=n_e, Z=Z, particle=particle)\n\n # expect_regime = 'Weakly coupled regime: Gamma = 0.0075178096952688445.'\n expect_regime = f\"Weakly coupled regime: Gamma = {blob.coupling()}.\"\n\n with pytest.warns(CouplingWarning):\n regime, _ = blob.regimes()\n testTrue = regime == expect_regime\n\n errStr = f\"Regime should be {expect_regime}, but got {regime} instead.\"\n assert testTrue, errStr\n\n def test_thermal_kinetic_energy_dominant(self):\n r\"\"\"\n Method to test for degeneracy parameter for a plasma.\n\n Tests against expected value for degeneracy parameter for a\n plasma in the thermal degenerate regime.\n\n The input values in this case have no special significance\n and are just to get the desired output.\n \"\"\"\n\n T_e = 10 * 11e3 * u.K\n n_e = 1e20 * u.cm ** -3\n Z = 2.5\n particle = \"p\"\n blob = plasmablob.PlasmaBlob(T_e=T_e, n_e=n_e, Z=Z, particle=particle)\n\n # expect_regime = 'Thermal kinetic energy dominant: Theta = 120.65958493847927'\n expect_regime = (\n f\"Thermal kinetic energy dominant: Theta = {blob.quantum_theta()}\"\n )\n\n _, regime = blob.regimes()\n testTrue = regime == expect_regime\n\n errStr = f\"Regime should be {expect_regime}, but got {regime} instead.\"\n assert testTrue, errStr\n\n def test_fermi_quantum_energy_dominant(self):\n r\"\"\"\n Method to test for degeneracy parameter for a plasma.\n\n Tests against expected value for degeneracy parameter for a\n plasma in the Fermi degenerate regime.\n\n The input values in this case have no special significance\n and are just to get the desired output.\n \"\"\"\n\n T_e = 6 * 15e3 * u.K\n n_e = 1e26 * u.cm ** -3\n Z = 3.0\n particle = \"p\"\n blob = plasmablob.PlasmaBlob(T_e=T_e, n_e=n_e, Z=Z, particle=particle)\n\n # expect_regime = 'Fermi quantum energy dominant: Theta = 0.009872147858602853'\n expect_regime = f\"Fermi quantum energy dominant: Theta = {blob.quantum_theta()}\"\n\n _, regime = blob.regimes()\n testTrue = regime == expect_regime\n\n errStr = f\"Regime should be {expect_regime}, but got {regime} instead.\"\n assert testTrue, errStr\n\n def test_both_fermi_and_thermal_energy_important(self):\n r\"\"\"\n Method to test for degeneracy parameter for a plasma.\n\n Tests against expected value for degeneracy parameter for a\n plasma whose both Fermi and thermal energy are important.\n\n The input values in this case have no special significance\n and are just to get the desired output.\n \"\"\"\n\n T_e = 5 * 15e3 * u.K\n n_e = 1e25 * u.cm ** -3\n Z = 2.0\n particle = \"p\"\n blob = plasmablob.PlasmaBlob(T_e=T_e, n_e=n_e, Z=Z, particle=particle)\n\n # expect_regime = 'Both Fermi and thermal energy important: Theta = 0.03818537605355442'\n expect_regime = (\n f\"Both Fermi and thermal energy important: Theta = {blob.quantum_theta()}\"\n )\n\n _, regime = blob.regimes()\n testTrue = regime == expect_regime\n\n errStr = f\"Regime should be {expect_regime}, but got {regime} instead.\"\n assert testTrue, errStr\n\n\nclass Test_PlasmaBlob:\n @classmethod\n def setup_class(self):\n \"\"\"initializing parameters for tests \"\"\"\n self.T_e = 5 * 11e3 * u.K\n self.n_e = 1e23 * u.cm ** -3\n self.Z = 2.5\n self.particle = \"p\"\n self.blob = plasmablob.PlasmaBlob(\n T_e=self.T_e, n_e=self.n_e, Z=self.Z, particle=self.particle\n )\n self.couplingVal = 10.468374460435724\n self.thetaVal = 0.6032979246923964\n\n def test_invalid_particle(self):\n \"\"\"\n Checks if function raises error for invalid particle.\n \"\"\"\n with pytest.raises(InvalidParticleError):\n plasmablob.PlasmaBlob(\n T_e=self.T_e, n_e=self.n_e, Z=self.Z, particle=\"cupcakes\"\n )\n\n def test_electron_temperature(self):\n \"\"\"Testing if we get the same electron temperature we put in \"\"\"\n testTrue = self.T_e == self.blob.electron_temperature\n errStr = (\n f\"Input electron temperature {self.T_e} should be equal to \"\n f\"electron temperature of class \"\n f\"{self.blob.electron_temperature}.\"\n )\n assert testTrue, errStr\n\n def test_electron_density(self):\n \"\"\"Testing if we get the same electron density we put in \"\"\"\n testTrue = self.n_e == self.blob.electron_density\n errStr = (\n f\"Input electron density {self.n_e} should be equal to \"\n f\"electron density of class \"\n f\"{self.blob.electron_density}.\"\n )\n assert testTrue, errStr\n\n def test_ionization(self):\n \"\"\"Testing if we get the same ionization we put in \"\"\"\n testTrue = self.Z == self.blob.ionization\n errStr = (\n f\"Input ionization {self.Z} should be equal to \"\n f\"ionization of class \"\n f\"{self.blob.ionization}.\"\n )\n assert testTrue, errStr\n\n def test_composition(self):\n \"\"\"Testing if we get the same composition (particle) we put in \"\"\"\n testTrue = self.particle == self.blob.composition\n errStr = (\n f\"Input particle {self.particle} should be equal to \"\n f\"composition of class \"\n f\"{self.blob.composition}.\"\n )\n assert testTrue, errStr\n\n def test_coupling(self):\n \"\"\"\n Tests if coupling method value meets expected value.\n \"\"\"\n methodVal = self.blob.coupling()\n errStr = (\n f\"Coupling parameter should be {self.couplingVal} \"\n f\"and not {methodVal.si.value}.\"\n )\n testTrue = np.isclose(methodVal.value, self.couplingVal, rtol=1e-6, atol=0.0)\n assert testTrue, errStr\n\n def test_quantum_theta(self):\n \"\"\"\n Tests if degeneracy parameter method value meets expected value.\n \"\"\"\n methodVal = self.blob.quantum_theta()\n errStr = (\n f\"Degeneracy parameter should be {self.thetaVal} \"\n f\"and not {methodVal.si.value}.\"\n )\n testTrue = np.isclose(methodVal.value, self.thetaVal, rtol=1e-6, atol=0.0)\n assert testTrue, errStr\n"
] | [
[
"numpy.allclose",
"numpy.array",
"numpy.isclose",
"numpy.linspace"
]
] |
ohsu6072/quantarhei | [
"713dc77e0b99a8edca0989e0e3fe2d102516d486"
] | [
"examples/demo_018_ModifiedRedfieldTheory_1.py"
] | [
"# -*- coding: utf-8 -*-\n\n#\n# Demo settings\n#\n_show_plots_ = False\n\n\nimport numpy\n\nimport quantarhei as qr\nfrom quantarhei.models.modelgenerator import ModelGenerator\n\nprint(\"\"\"\n*******************************************************************************\n* *\n* Modified Redfield Theory Demo *\n* * \n*******************************************************************************\n\"\"\")\n\nNt = 1000\ndt = 1.0\ntime = qr.TimeAxis(0.0, Nt, dt)\n\n\nmg = ModelGenerator()\nagg = mg.get_Aggregate_with_environment(name=\"pentamer-1_env\",\n timeaxis=time)\n\n\nagg.build()\n\n\nsbi = agg.get_SystemBathInteraction()\nham = agg.get_Hamiltonian()\nham.set_name(\"Hamiltonian\")\n\nprint(\">>> Hamiltonian \")\nwith qr.energy_units(\"1/cm\"):\n print(ham)\n\nprint(\"\"\"\n*******************************************************************************\n\n Calculating relaxation tensor\n \n*******************************************************************************\n\"\"\")\n\nm = qr.Manager()\nm.warn_about_basis_change = False \n\nsb_reference = qr.BasisReferenceOperator(ham.dim,\n name=\"site basis reference\")\n\n#\n# Calculation of various relaxation tensors\n#\n\nham.protect_basis()\nwith qr.eigenbasis_of(ham):\n \n #RRT = qr.qm.RedfieldRelaxationTensor(ham, sbi, name=\"Tensor 1\")\n #\n #print(\"\\nRelaxation times from the full relaxation tensor\")\n #for i in range(1, ham.dim):\n # for j in range(1, ham.dim):\n # print(i, \"<-\", j, \":\", 1.0/numpy.real(RRT.data[i,i,j,j]))\n \n print(\"\\nCalculating relaxation rates\")\n \n try: \n RRM = qr.qm.ModifiedRedfieldRateMatrix(ham, sbi)\n print(\"\\nRelaxation times from the rate matrix\")\n \n for i in range(1,ham.dim):\n for j in range(1, ham.dim):\n print(i, \"<-\", j, \":\", 1.0/RRM.data[i,j])\n\n except:\n pass\n \n #print(\"\\nComparison of the results: ratio of rates\")\n #for i in range(1, ham.dim):\n # for j in range(1, ham.dim):\n # print(i, \"<-\", j, \":\", RRM.data[i,j]/numpy.real(RRT.data[i,i,j,j]))\n\n #TDRRM = qr.qm.TDRedfieldRateMatrix(ham, sbi)\n #print(\"\\nRelaxation times from the rate matrix\")\n #for i in range(1,ham.dim):\n # for j in range(1, ham.dim):\n # print(i, \"<-\", j, \":\", 1.0/TDRRM.data[time.length-1,i,j])\n\nham.unprotect_basis()\n\nif False:\n with qr.eigenbasis_of(ham):\n \n #\n # Evolution of reduced density matrix\n #\n \n prop = qr.ReducedDensityMatrixPropagator(time, ham, RRT)\n \n rho_i = qr.ReducedDensityMatrix(dim=ham.dim, name=\"Initial DM\")\n rho_i.data[3,3] = 1.0\n \n # FIXME: unprotecting does not work correctly\n #RRT.unprotect_basis()\n \n with qr.eigenbasis_of(sb_reference):\n print(\" Relaxation time site basis: \", 1.0/RRT.data[1,1,2,2])\n \n RRT.secularize()\n print(\" Relaxation time exciton basis: \", 1.0/RRT.data[1,1,2,2])\n rho_t = prop.propagate(rho_i, name=\"Redfield evolution\")\n \n if _show_plots_:\n rho_t.plot(coherences=False)\n \n rho_i1 = qr.ReducedDensityMatrix(dim=ham.dim, name=\"Initial DM\")\n rho_i1.data[3,3] = 1.0 \n \n \n #rho_t.plot(coherences=False)\n \n \n \n #\n # Evolution of populations\n #\n \n prop = qr.PopulationPropagator(time, RRM)\n p0 = [i for i in range(ham.dim)]\n p0[3] = 1.0\n pop_t = prop.propagate(p0)\n \n if _show_plots_:\n import matplotlib.pyplot as plt\n plt.plot(time.data, pop_t[:,3],'--r')\n plt.show()\n \n #print(RRM.data[2,3])\n #with eigenbasis_of(ham):\n # print(RRT.data[2,2,3,3])\n \n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot"
]
] |
rowanshah/Qiskit-Quantum-Approach-To-Rendezvous-Probelem | [
"3d6f08d7e2d05146a70f7a29a28d4878a565c782"
] | [
"Solution/Plots/PlotAccuracy.py"
] | [
"#%%\n#Accuracy Test script\nimport numpy as np\nfrom qiskit import IBMQ, BasicAer, Aer\nfrom qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute\nfrom qiskit.providers.ibmq import least_busy\nimport matplotlib.pyplot as plt\ndef oracle(maze, alice_room, bob_room, q_oracle, q_oracle_ancilla):\n n = int(alice_room.size/2)\n for i in range(n):\n maze.cx(alice_room[i], bob_room[i])\n maze.x(bob_room[i])\n maze.x(bob_room[i+1])\n maze.cx(alice_room[i+1], bob_room[i+1])\n maze.ccx(bob_room[i], bob_room[i+1], q_oracle[i])\n maze.barrier()\n maze.cx(q_oracle, q_oracle_ancilla)\n maze.barrier() # f inverse\n for i in range(n):\n maze.ccx(bob_room[i], bob_room[i+1], q_oracle[i])\n maze.cx(alice_room[i+1], bob_room[i+1])\n maze.x(bob_room[i])\n maze.cx(alice_room[i], bob_room[i])\n maze.x(bob_room[i+1])\n maze.barrier()\ndef phase_shift(maze, rooms):\n n = int(rooms.size / 2)\n maze.h(rooms)\n maze.x(rooms)\n for i in range(n):\n maze.h(rooms[i + 1])\n maze.cx(rooms[i], rooms[i + 1])\n maze.h(rooms[i + 1])\n maze.x(rooms)\n maze.h(rooms)\n maze.barrier()\ndef state_preparation(n):\n n_oracle = int(n/2)\n alice_room = QuantumRegister(n)\n bob_room = QuantumRegister(n)\n maze = QuantumCircuit(alice_room)\n maze.add_register(bob_room)\n maze.h(alice_room)\n maze.h(bob_room)\n q_oracle= QuantumRegister(n_oracle)\n q_oracle_ancilla = QuantumRegister(n_oracle)\n c_alice = ClassicalRegister(n)\n c_bob = ClassicalRegister(n)\n maze.add_register(q_oracle)\n maze.add_register(q_oracle_ancilla)\n maze.add_register(c_alice)\n maze.add_register(c_bob)\n maze.x(q_oracle_ancilla)\n maze.h(q_oracle_ancilla)\n maze.barrier()\n maze.draw(output='mpl', filename='state_preparation_n')\n return maze, alice_room, bob_room, q_oracle, q_oracle_ancilla, c_alice, c_bob\ndef rendezvous(n_rooms, n_iteration, n_shots):\n maze, alice_room, bob_room, q_oracle, q_oracle_ancilla, c_alice, c_bob = state_preparation(n_rooms)\n for i in range(n_iteration):\n oracle(maze, alice_room, bob_room, q_oracle, q_oracle_ancilla)\n phase_shift(maze, bob_room)\n oracle(maze, alice_room, bob_room, q_oracle, q_oracle_ancilla)\n phase_shift(maze, alice_room)\n maze.measure(alice_room, c_alice)\n maze.measure(bob_room, c_bob)\n # Load on real device\n # IBMQ.load_account()\n # provider = IBMQ.get_provider(hub='ibm-q')\n # backend = least_busy(provider.backends(simulator=True))\n # counts = execute(maze, backend).result().get_counts()\n counts = execute(maze, Aer.get_backend('qasm_simulator'), shots=n_shots). \\\n result().get_counts()\n winners = [counts.get(k) for k in counts.keys() if k[:n_rooms] == k[n_rooms + 1:n_rooms * 2 + 1]]\n accuracy = sum(winners) / n_shots\n print(counts)\n print(\"done!\")\n print(\"number of iterations: \", n_iteration)\n print(\"accuracy: \", accuracy)\n maze.draw(output='mpl', filename='alice-bob_n')\n return counts, accuracy\ndef plot_counts(counts, n_rooms, n_iteration):\n count_value = [counts.get(k) for k in counts.keys()]\n count_label = [k for k in counts.keys()]\n x = np.arange(len(counts))\n plt.bar(x, height=count_value)\n plt.xticks(x, count_label)\n plt.title('Number of rooms for each pawn: ' + str(n_rooms) +\n ', Number of iterations: ' + str(n_iteration))\n\ndef plot_accuracy(n_range, n_shots): \n iter_list = []\n accuracy_list = []\n for j in range(1, n_range):\n n_iteration = j\n n_rooms = 2\n n_shots = n_shots\n counts, accuracy = rendezvous(n_rooms, n_iteration, n_shots)\n iter_list.append(j)\n accuracy_list.append(accuracy)\n #plot_accuracy(counts, n_rooms, n_iteration)\n #plot_counts(counts, n_rooms, n_iteration)\n #counts_sorted = sorted(counts.values())\n #counts_sorted.reve rse()\n\n# accuracy vs no of iterations\n x = np.arange(len(iter_list))\n plt.bar(x, height=accuracy_list)\n plt.xticks(x, iter_list)\n plt.title('Success Probability') \n\nplot_accuracy(21, 1024)\n#%%\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xticks"
]
] |
hbrunie/cctbx_project | [
"2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5"
] | [
"xfel/clustering/singleframe.py"
] | [
"\"\"\" Module for working with single images in a serial crystallography\ndataset\"\"\"\nfrom __future__ import absolute_import, division, print_function\nfrom libtbx import easy_pickle\nimport numpy as np\nimport math\nimport logging\nfrom cctbx.array_family import flex\nfrom six.moves import cPickle as pickle\nfrom .api import InputFrame\nfrom six.moves import zip\nlogger = logging.getLogger('sf')\n\nclass SingleFrame(InputFrame):\n \"\"\" Class that creates single-image agregate metrics/scoring that can then be\n used in downstream clustering or filtering procedures.\n \"\"\"\n ANGSTROMS_TO_EV = 12398.425\n\n def __init__(self, path=None, filename=None, crystal_num=0,\n remove_negative=False, use_b=True, scale=True, dicti=None,\n pixel_size=None):\n \"\"\"\n Constructor for SingleFrame object, using a cctbx.xfel integration pickle.\n\n :param path: path to integration pickle\n :param filename: the file name alone (used as a label)\n :param crystal_num: if multiple lattices present, the latice number.\n :param remove_negative: Boolean for removal of negative intensities\n :param use_b: if True, initialise scale and B, if false, use only mean-intensity scaling.\n :param dicti: optional. If a dictionairy is supplied here, will create object from that rather than attempting to read the file specified in path, filename.\n :param pixel_size: the size of pixels in mm. Defaults to a MAR detector with a warning at debug level of logging.\n :param scale: if False, will intialise scales to G=1, B=0.\n\n\n :return: a SingleFrame object, with the following Object attributes:\n\n\n Object attributes are:\n - `is_polarization_corrected`: Boolean flag indicatinf if polarization correction has been applied\n - `miller_array`: the cctbx.miller miller array of spot intensities.\n - `mapped_predictions`: the mapped_predictions locations\n - `path`: full path to the original file\n - `name`: file-name, used as an identifier\n - `crystal_system:\n - `pg`: point group of pickle\n - `uc`: Niggli unit cell as a tuple\n - `orientation`: cctbx crystal_orientation object\n - `total_i`: the total integrated intensity for this frame\n - `xbeam`: x-location of beam centre\n - `ybeam`: y-location of beam centre\n - `wavelength:\n - `spot_offset`: the mean offset between observed spots and predicted centroids. Only created if integration was performed using verbose_cv=True. Otherwise None.\n - `minus_2B`: the gradient of the ln(i) vs. sinsqtheta_over_lambda_sq plot\n - `G`: intercept of the of the ln(i) vs. sinsqtheta_over_lambda_sq plot\n - `log_i`: list of log_i intensities\n - `sinsqtheta_over_lambda_sq`: list of sinsqtheta_over_lambda_sq\n - `wilson_err`: standard error on the fit of ln(i) vs. sinsqtheta_over_lambda_sq\n - `miller_fullies`: a cctbx.miller array of fully recorded intensites.\n \"\"\"\n if dicti is not None:\n d = dicti\n else:\n try:\n d = easy_pickle.load(path)\n except (pickle.UnpicklingError, ValueError, EOFError, IOError):\n d = {}\n logger.warning(\"Could not read %s. It may not be a pickle file.\" % path)\n if 'observations' not in d or len(d['observations'][crystal_num].data()) == 0:\n return\n try:\n if pixel_size:\n self.pixel_size = pixel_size\n else:\n logger.debug(\"No pixel size specified, defaulting to MAR (0.079346). \"\n \"Bad times if this is not the correct detector!\")\n self.pixel_size = 0.079346\n # Warn on error, but continue directory traversal.\n self.is_polarization_corrected = False\n # Miller arrays\n self.miller_array = d['observations'][crystal_num]\n self.mapped_predictions = d['mapped_predictions'][crystal_num]\n # Image pickle info\n self.path = path or d['path']\n self.name = filename\n # Unit cell info\n self.crystal_system = self.miller_array.crystal_symmetry()\\\n .space_group().crystal_system()\n self.pg = d['pointgroup'].replace(' ', '') # enforce consistency\n # XXX major bug here??? niggli cell not produced with knowledge of the centring symbol???\n self.uc = d['current_orientation'][crystal_num].unit_cell() \\\n .niggli_cell() \\\n .parameters()\n self.orientation = d['current_orientation'][crystal_num]\n # Agregate info\n self.total_i = d['observations'][crystal_num].sum()\n self.xbeam = d['xbeam']\n self.ybeam = d['ybeam']\n self.wavelength = d['wavelength']\n self.distance = d['distance']\n if 'correction_vectors' in d:\n all_corrections = []\n for spot in d['correction_vectors'][crystal_num]:\n dta = np.sqrt((spot['refinedcenter'][0] - spot['obscenter'][0]) ** 2\n + (spot['refinedcenter'][1] - spot['obscenter'][1]) ** 2)\n all_corrections.append(dta)\n self.spot_offset = np.mean(all_corrections)\n else:\n self.spot_offset = None\n\n if remove_negative:\n self.filter_negative_intensities()\n\n # Do polarization correction\n self.polarization_correction()\n self.minus_2B, self.G, self.log_i, \\\n self.sinsqtheta_over_lambda_sq, \\\n self.wilson_err = self.init_calc_wilson(use_b)\n if not scale:\n self.minus_2B = 0\n self.G = 1\n if logger.root.level < logging.DEBUG: # Extreme debug!\n self.plot_wilson()\n logger.debug(\"Extracted image {}\".format(filename))\n except KeyError:\n logger.warning(\"Could not extract point group and unit cell from %s\" % path)\n\n self.miller_fullies = None\n\n def trim_res_limit(self, d_min=None, d_max=None):\n \"\"\"\n Remove all miller indicies outside the range of _d_min, _d_max.\n Changes the object in place.\n\n :param d_min: min res of new miller array. Defaults to current value.\n :param d_max: max res of new miller array. Defaults to current value.\n \"\"\"\n if d_min is None:\n d_min = self.miller_array.d_min()\n if d_max is None:\n d_max = self.miller_array.d_max_min()[0]\n self.miller_array = self.miller_array.resolution_filter(d_max, d_min).sort()\n\n def filter_negative_intensities(self):\n \"\"\"\n Filters negative intensities from the Miller array. Acts in place.\n :return: acts in place.\n \"\"\"\n i_I_positive = (self.miller_array.data() > 0)\n self.miller_array = self.miller_array.select(i_I_positive).sort()\n self.mapped_predictions = self.mapped_predictions.select(i_I_positive)\n\n def n_reflections_by_sigi(self, sig_i_cuttoff):\n \"\"\"\n Currently a placeholder that returns None.\n\n This method should return the number of reflection in the frame that have an\n I/sig(I) > sig_i_cuttoff\n \"\"\"\n reflections_above_cuttoff = None\n return len(reflections_above_cuttoff)\n\n def init_calc_wilson(self, use_b_factor, i_corrections=None):\n \"\"\" If use_b_factor is\n :param i_corrections: allows flex array of correction factors (e.g. partialities) to be specified\n :param use_b_factor: if True, do a linear regression to fit G and B and returns the coeficients minus_2B, G, the transformed data log_i, and one_over_d_sqare. Also returns fit_stats, which is a dictionairy. If use_b_factor is False, then B is 0, and G is the mean intensity of the image. The r_value is then 0 (by definition), and the std_err is the standard error on the mean.\n\n :return minus_2B, G, log_i, on_over_d_square: `minus_2B`: gradient of fit; `G`: intercept of fit; `log_i`: dependent variable of fit; `one_over_d_square`: independent variable of fit.\n \"\"\"\n if i_corrections:\n inten = (self.miller_array.sort().data() * i_corrections).as_numpy_array()\n else:\n inten = self.miller_array.sort().data().as_numpy_array()\n sinsqtheta_over_labmdasq = self.miller_array.sort()\\\n .sin_theta_over_lambda_sq().data().as_numpy_array()\n\n # then plot them as negative in the linear fit.\n inten, sinsqtheta_over_labmdasq = zip(*[i for i\n in zip(inten,\n sinsqtheta_over_labmdasq)\n if i[0] >= 0])\n\n if use_b_factor:\n from scipy.stats import linregress\n minus_2B, G, r_val, _, std_err = linregress(sinsqtheta_over_labmdasq,\n np.log(inten))\n else:\n # If the model is a constant value, r_val = 0, and\n from scipy.stats import sem\n minus_2B, G, r_val, std_err = 0, np.mean(inten), 0, sem(inten)\n\n # ignore p_val since this will be insanely small\n logger.debug(\"G: {}, -2B: {}, r: {}, std_err: {}\".\n format(G, minus_2B, r_val, std_err))\n return minus_2B, G, np.log(inten), sinsqtheta_over_labmdasq, {\"R\": r_val,\n \"Standard Error\": std_err}\n\n def plot_wilson(self, width=30, ax=None):\n \"\"\" Makes a log(I) vs 1/d**2 plot, displaying the raw partial data, a\n rolling average of the data, and the Wilson model fit to the data.\n\n :param: width: smoothing window size\n :param: ax: optional axes object to ve used for plotting\n \"\"\"\n\n import matplotlib.pyplot as plt\n if ax is None:\n fig = plt.figure()\n ax = fig.gca()\n direct_visualisation = True\n else:\n direct_visualisation = False\n\n smooth = self._moving_average(self.log_i, n=width)\n ax.plot(self.sinsqtheta_over_lambda_sq[width - 1:], smooth,\n '--r', lw=3)\n ax.plot(self.sinsqtheta_over_lambda_sq, self.log_i, 'bo', ms=2)\n ax.plot([0, -1 * self.G / self.minus_2B], [self.G, 0], 'y-', lw=2)\n plt.xlim(0, max(self.sinsqtheta_over_lambda_sq))\n plt.xlabel(\"(sin(theta)/lambda)^2\")\n plt.ylabel(\"ln(I)\")\n plt.title(\"Single frame Wilson fit\\n{}\\nG: {}, B: {}, r: {}, std_err: {}\".\n format(self.name, self.G, -1 * self.minus_2B / 2,\n self.wilson_err['R'], self.wilson_err['Standard Error']))\n\n if direct_visualisation:\n plt.show()\n return ax\n\n \"\"\" Spline method removed because it will be v.slow\n from scipy.interpolate import UnivariateSpline as Spline\n from numpy import linspace\n xs = linspace(min(self.one_over_d_square), max(self.one_over_d_square), 100)\n spl = Spline(self.one_over_d_square, self.log_i, s=10000)\n ys = spl(xs)\n plt.plot(xs, ys, '--g', lw=3)\n \"\"\"\n \"\"\" idiomatic CCTBX method removed because I want more fine-grained detail\n _d_star_p = 1.618034 # Golden ratio distribution for d-spacings\n binner = self.miller_array.setup_binner(n_bins=nbins)\n #logger.debug(str(\"{}\".format(binner.show_summary())))\n bin_selections = [binner.selection(i) for i in binner.range_used()]\n means = [self.miller_array.select(sel).mean() for sel in bin_selections]\n log_means = [math.log(mil) if mil > 0 else 0 for mil in means]\n centers = binner.bin_centers(_d_star_p)\n d_centers = centers ** (-1 / _d_star_p)\n plt.plot(1/(d_centers**2), log_means)\n plt.show()\n \"\"\"\n\n def polarization_correction(self):\n \"\"\" Perform basic polarization correction in place, and change the\n is_polarization_corrected flag to True.\n\n I_corrected = 2*I_uncorrected/(1 + cos(two_theta)**2)\n \"\"\"\n two_theta = self.miller_array.two_theta(wavelength=self.wavelength).data()\n one_over_P = 2/(1 + (flex.cos(two_theta) ** 2))\n self.miller_array = self.miller_array.customized_copy(\n data=self.miller_array.data() * one_over_P)\n self.is_polarization_corrected = True\n\n def distance_from(self, other_uc):\n \"\"\"\n Calculates distance using NCDist from Andrews and Bernstein J. Appl.\n Cryst. 2014 between this frame and some other unit cell.\n :param:other_uc: a 6-tuple of a, b, c, alpha, beta, gamma for some unit cell\n :return: the NCDist in A^2 to other_uc\n \"\"\"\n from cctbx.uctbx.determine_unit_cell import NCDist\n self_g6 = self.make_g6(self.uc)\n other_g6 = self.make_g6(other_uc)\n return NCDist(self_g6, other_g6)\n\n def to_panda(self):\n \"\"\" Returns the object attributes as a pandas series \"\"\"\n import pandas as pd\n return pd.Series({'path': self.path,\n 'name': self.name,\n 'crystal_system': self.crystal_system,\n 'point group': self.pg,\n 'a': self.uc[0],\n 'b': self.uc[1],\n 'c': self.uc[2],\n 'alpha': self.uc[3],\n 'beta': self.uc[4],\n 'gamma': self.uc[5],\n 'total_i': self.total_i,\n 'wavelength': self.wavelength,\n 'spot_offset': self.spot_offset,\n 'minus_2B': self.minus_2B,\n 'G': self.G,\n 'willson_err': self.wilson_err})\n\n\n @staticmethod\n def _moving_average(array, n=50):\n \"\"\" quick method for moving average, needed for smoothing plots. Implements\n a summer area table approach.\"\"\"\n tmp = np.cumsum(array, dtype=float)\n tmp[n:] = tmp[n:] - tmp[:-n]\n return tmp[n - 1:] / n\n\n @staticmethod\n def make_g6(uc):\n \"\"\" Take a reduced Niggli Cell, and turn it into the G6 representation \"\"\"\n a = uc[0] ** 2\n b = uc[1] ** 2\n c = uc[2] ** 2\n d = 2 * uc[1] * uc[2] * math.cos(uc[3])\n e = 2 * uc[0] * uc[2] * math.cos(uc[4])\n f = 2 * uc[0] * uc[1] * math.cos(uc[5])\n return [a, b, c, d, e, f]\n\nclass SingleDialsFrame(SingleFrame):\n def __init__(self, refl=None, expt=None, id=None, **kwargs):\n from xfel.command_line.frame_extractor import ConstructFrame\n frame = ConstructFrame(refl, expt).make_frame()\n SingleFrame.__init__(self, dicti=frame, path=str(id), **kwargs)\n self.experiment = expt\n self.reflections = refl\n\nclass SingleDialsFrameFromFiles(SingleFrame):\n def __init__(self, refls_path=None, expts_path=None, **kwargs):\n from xfel.command_line.frame_extractor import ConstructFrameFromFiles\n frame = ConstructFrameFromFiles(refls_path, expts_path).make_frame()\n SingleFrame.__init__(self, dicti=frame, path=\" \".join((refls_path, expts_path)), **kwargs)\n\nclass CellOnlyFrame(SingleFrame):\n def __init__(self, crystal_symmetry, path=None, name=None, lattice_id=None):\n from six.moves import cStringIO as StringIO\n f = StringIO()\n self.crystal_symmetry = crystal_symmetry\n self.crystal_symmetry.show_summary(f=f)\n self.niggli_cell = self.crystal_symmetry.niggli_cell()\n self.niggli_cell.show_summary(f=f, prefix=\" niggli-->\")\n logger.info(f.getvalue())\n self.uc = self.niggli_cell.unit_cell().parameters()\n self.mm = self.niggli_cell.unit_cell().metrical_matrix()\n self.pg = \"\".join(self.crystal_symmetry.space_group().type().lookup_symbol().split())\n self.path = path\n self.name = name\n self.lattice_id = lattice_id\n\nclass SingleDialsFrameFromJson(SingleFrame):\n def __init__(self, expts_path=None, **kwargs):\n from dials.util.options import Importer, flatten_experiments\n importer = Importer([expts_path], read_experiments=True, read_reflections=False, check_format=False)\n if importer.unhandled:\n # in python 2: raise Exception(\"unable to process:\"), importer.unhandled\n raise Exception(\"unable to process:\")\n experiments_l = flatten_experiments(importer.experiments)\n assert len(experiments_l)==1, \"Sorry, only supports one experiment per json at present.\"\n tcrystal = experiments_l[0].crystal\n from cctbx import crystal\n group = tcrystal.get_space_group()\n self.crystal_symmetry = crystal.symmetry(unit_cell=tcrystal.get_unit_cell(),\n space_group=group)\n self.crystal_symmetry.show_summary()\n self.niggli_cell = self.crystal_symmetry.niggli_cell()\n self.niggli_cell.show_summary(prefix=\" niggli-->\")\n self.uc = self.niggli_cell.unit_cell().parameters()\n self.mm = self.niggli_cell.unit_cell().metrical_matrix()\n self.pg = \"\".join(group.type().lookup_symbol().split())\n self.path = expts_path\n"
] | [
[
"numpy.log",
"matplotlib.pyplot.xlabel",
"numpy.mean",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"numpy.cumsum",
"scipy.stats.sem",
"matplotlib.pyplot.show",
"numpy.sqrt",
"pandas.Series"
]
] |
SeanNaren/fairscale | [
"2d3d5a7bb7340963383afd5b4e9a0b53e1238c35"
] | [
"benchmarks/experimental/benchmark_dataset.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport torch\nfrom torch.utils.data import Dataset\n\n# TODO(sidgoyal): Refactor benchmarks to remove this file eventually.\n\n\ndef collate_sentences_lm(samples):\n\n if len(samples) == 0:\n return {}\n\n id = torch.LongTensor([s[\"id\"] for s in samples])\n src_tokens = torch.stack([s[\"source\"] for s in samples], 0)\n tgt_tokens = torch.stack([s[\"target\"] for s in samples], 0)\n ntokens = len(samples) * len(samples[0][\"target\"])\n src_lengths = torch.LongTensor([len(samples[0][\"source\"])] * len(samples))\n\n batch = {\n \"id\": id,\n \"nsentences\": len(samples),\n \"ntokens\": ntokens,\n \"input\": src_tokens,\n \"target\": tgt_tokens,\n }\n return batch\n\n\nclass BenchmarkLMDataset(Dataset):\n \"\"\"\n Dataset to benchmark a translation like seq2seq task.\n Args:\n vocab_size (int, optional): size of the vocabulary (default 10000).\n max_source_positions (int, optional): max number of tokens in the\n source sentence (default: 1024).\n total_samples (int, optional): the total number of rows in the\n dataset (default: 10000).\n \"\"\"\n\n def __init__(\n self, vocab_size=10000, max_source_positions=1024, total_samples=10000,\n ):\n self.vocab_size = vocab_size\n self.max_source_positions = max_source_positions\n self.total_samples = total_samples\n self.sizes = [self.max_source_positions] * self.total_samples\n\n def __getitem__(self, index):\n length = self.sizes[index]\n source = torch.randint(1, self.vocab_size, (length,))\n target = source.clone()\n return {\n \"id\": index,\n \"source\": source,\n \"target\": target,\n }\n\n def __len__(self):\n return self.total_samples\n"
] | [
[
"torch.randint",
"torch.LongTensor",
"torch.stack"
]
] |
lefatoum2/opencv | [
"f7cab121fe2954c67b343b3b7805e1c092812093"
] | [
"samples/dnn/dasiamrpn_tracker.py"
] | [
"\"\"\"\nDaSiamRPN tracker.\nOriginal paper: https://arxiv.org/abs/1808.06048\nLink to original repo: https://github.com/foolwood/DaSiamRPN\nLinks to onnx models:\nnetwork: https://www.dropbox.com/s/rr1lk9355vzolqv/dasiamrpn_model.onnx?dl=0\nkernel_r1: https://www.dropbox.com/s/999cqx5zrfi7w4p/dasiamrpn_kernel_r1.onnx?dl=0\nkernel_cls1: https://www.dropbox.com/s/qvmtszx5h339a0w/dasiamrpn_kernel_cls1.onnx?dl=0\n\"\"\"\n\nimport numpy as np\nimport cv2 as cv\nimport argparse\nimport sys\n\nclass DaSiamRPNTracker:\n # Initialization of used values, initial bounding box, used network\n def __init__(self, net=\"dasiamrpn_model.onnx\", kernel_r1=\"dasiamrpn_kernel_r1.onnx\", kernel_cls1=\"dasiamrpn_kernel_cls1.onnx\"):\n self.windowing = \"cosine\"\n self.exemplar_size = 127\n self.instance_size = 271\n self.total_stride = 8\n self.score_size = (self.instance_size - self.exemplar_size) // self.total_stride + 1\n self.context_amount = 0.5\n self.ratios = [0.33, 0.5, 1, 2, 3]\n self.scales = [8, ]\n self.anchor_num = len(self.ratios) * len(self.scales)\n self.penalty_k = 0.055\n self.window_influence = 0.42\n self.lr = 0.295\n self.score = []\n if self.windowing == \"cosine\":\n self.window = np.outer(np.hanning(self.score_size), np.hanning(self.score_size))\n elif self.windowing == \"uniform\":\n self.window = np.ones((self.score_size, self.score_size))\n self.window = np.tile(self.window.flatten(), self.anchor_num)\n # Loading network`s and kernel`s models\n self.net = cv.dnn.readNet(net)\n self.kernel_r1 = cv.dnn.readNet(kernel_r1)\n self.kernel_cls1 = cv.dnn.readNet(kernel_cls1)\n\n def init(self, im, init_bb):\n target_pos, target_sz = np.array([init_bb[0], init_bb[1]]), np.array([init_bb[2], init_bb[3]])\n self.im_h = im.shape[0]\n self.im_w = im.shape[1]\n self.target_pos = target_pos\n self.target_sz = target_sz\n self.avg_chans = np.mean(im, axis=(0, 1))\n\n # When we trying to generate ONNX model from the pre-trained .pth model\n # we are using only one state of the network. In our case used state\n # with big bounding box, so we were forced to add assertion for\n # too small bounding boxes - current state of the network can not\n # work properly with such small bounding boxes\n if ((self.target_sz[0] * self.target_sz[1]) / float(self.im_h * self.im_w)) < 0.004:\n raise AssertionError(\n \"Initializing BB is too small-try to restart tracker with larger BB\")\n\n self.anchor = self.__generate_anchor()\n wc_z = self.target_sz[0] + self.context_amount * sum(self.target_sz)\n hc_z = self.target_sz[1] + self.context_amount * sum(self.target_sz)\n s_z = round(np.sqrt(wc_z * hc_z))\n z_crop = self.__get_subwindow_tracking(im, self.exemplar_size, s_z)\n z_crop = z_crop.transpose(2, 0, 1).reshape(1, 3, 127, 127).astype(np.float32)\n self.net.setInput(z_crop)\n z_f = self.net.forward('63')\n self.kernel_r1.setInput(z_f)\n r1 = self.kernel_r1.forward()\n self.kernel_cls1.setInput(z_f)\n cls1 = self.kernel_cls1.forward()\n r1 = r1.reshape(20, 256, 4, 4)\n cls1 = cls1.reshape(10, 256 , 4, 4)\n self.net.setParam(self.net.getLayerId('65'), 0, r1)\n self.net.setParam(self.net.getLayerId('68'), 0, cls1)\n\n # Сreating anchor for tracking bounding box\n def __generate_anchor(self):\n self.anchor = np.zeros((self.anchor_num, 4), dtype = np.float32)\n size = self.total_stride * self.total_stride\n count = 0\n\n for ratio in self.ratios:\n ws = int(np.sqrt(size / ratio))\n hs = int(ws * ratio)\n for scale in self.scales:\n wws = ws * scale\n hhs = hs * scale\n self.anchor[count] = [0, 0, wws, hhs]\n count += 1\n\n score_sz = int(self.score_size)\n self.anchor = np.tile(self.anchor, score_sz * score_sz).reshape((-1, 4))\n ori = - (score_sz / 2) * self.total_stride\n xx, yy = np.meshgrid([ori + self.total_stride * dx for dx in range(score_sz)], [ori + self.total_stride * dy for dy in range(score_sz)])\n xx, yy = np.tile(xx.flatten(), (self.anchor_num, 1)).flatten(), np.tile(yy.flatten(), (self.anchor_num, 1)).flatten()\n self.anchor[:, 0], self.anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)\n return self.anchor\n\n # Function for updating tracker state\n def update(self, im):\n wc_z = self.target_sz[1] + self.context_amount * sum(self.target_sz)\n hc_z = self.target_sz[0] + self.context_amount * sum(self.target_sz)\n s_z = np.sqrt(wc_z * hc_z)\n scale_z = self.exemplar_size / s_z\n d_search = (self.instance_size - self.exemplar_size) / 2\n pad = d_search / scale_z\n s_x = round(s_z + 2 * pad)\n\n # Region preprocessing part\n x_crop = self.__get_subwindow_tracking(im, self.instance_size, s_x)\n x_crop = x_crop.transpose(2, 0, 1).reshape(1, 3, 271, 271).astype(np.float32)\n self.score = self.__tracker_eval(x_crop, scale_z)\n self.target_pos[0] = max(0, min(self.im_w, self.target_pos[0]))\n self.target_pos[1] = max(0, min(self.im_h, self.target_pos[1]))\n self.target_sz[0] = max(10, min(self.im_w, self.target_sz[0]))\n self.target_sz[1] = max(10, min(self.im_h, self.target_sz[1]))\n\n cx, cy = self.target_pos\n w, h = self.target_sz\n updated_bb = (cx, cy, w, h)\n return True, updated_bb\n\n # Function for updating position of the bounding box\n def __tracker_eval(self, x_crop, scale_z):\n target_size = self.target_sz * scale_z\n self.net.setInput(x_crop)\n outNames = self.net.getUnconnectedOutLayersNames()\n outNames = ['66', '68']\n delta, score = self.net.forward(outNames)\n delta = np.transpose(delta, (1, 2, 3, 0))\n delta = np.ascontiguousarray(delta, dtype = np.float32)\n delta = np.reshape(delta, (4, -1))\n score = np.transpose(score, (1, 2, 3, 0))\n score = np.ascontiguousarray(score, dtype = np.float32)\n score = np.reshape(score, (2, -1))\n score = self.__softmax(score)[1, :]\n delta[0, :] = delta[0, :] * self.anchor[:, 2] + self.anchor[:, 0]\n delta[1, :] = delta[1, :] * self.anchor[:, 3] + self.anchor[:, 1]\n delta[2, :] = np.exp(delta[2, :]) * self.anchor[:, 2]\n delta[3, :] = np.exp(delta[3, :]) * self.anchor[:, 3]\n\n def __change(r):\n return np.maximum(r, 1./r)\n\n def __sz(w, h):\n pad = (w + h) * 0.5\n sz2 = (w + pad) * (h + pad)\n return np.sqrt(sz2)\n\n def __sz_wh(wh):\n pad = (wh[0] + wh[1]) * 0.5\n sz2 = (wh[0] + pad) * (wh[1] + pad)\n return np.sqrt(sz2)\n\n s_c = __change(__sz(delta[2, :], delta[3, :]) / (__sz_wh(target_size)))\n r_c = __change((target_size[0] / target_size[1]) / (delta[2, :] / delta[3, :]))\n penalty = np.exp(-(r_c * s_c - 1.) * self.penalty_k)\n pscore = penalty * score\n pscore = pscore * (1 - self.window_influence) + self.window * self.window_influence\n best_pscore_id = np.argmax(pscore)\n target = delta[:, best_pscore_id] / scale_z\n target_size /= scale_z\n lr = penalty[best_pscore_id] * score[best_pscore_id] * self.lr\n res_x = target[0] + self.target_pos[0]\n res_y = target[1] + self.target_pos[1]\n res_w = target_size[0] * (1 - lr) + target[2] * lr\n res_h = target_size[1] * (1 - lr) + target[3] * lr\n self.target_pos = np.array([res_x, res_y])\n self.target_sz = np.array([res_w, res_h])\n return score[best_pscore_id]\n\n def __softmax(self, x):\n x_max = x.max(0)\n e_x = np.exp(x - x_max)\n y = e_x / e_x.sum(axis = 0)\n return y\n\n # Reshaping cropped image for using in the model\n def __get_subwindow_tracking(self, im, model_size, original_sz):\n im_sz = im.shape\n c = (original_sz + 1) / 2\n context_xmin = round(self.target_pos[0] - c)\n context_xmax = context_xmin + original_sz - 1\n context_ymin = round(self.target_pos[1] - c)\n context_ymax = context_ymin + original_sz - 1\n left_pad = int(max(0., -context_xmin))\n top_pad = int(max(0., -context_ymin))\n right_pad = int(max(0., context_xmax - im_sz[1] + 1))\n bot_pad = int(max(0., context_ymax - im_sz[0] + 1))\n context_xmin += left_pad\n context_xmax += left_pad\n context_ymin += top_pad\n context_ymax += top_pad\n r, c, k = im.shape\n\n if any([top_pad, bot_pad, left_pad, right_pad]):\n te_im = np.zeros((\n r + top_pad + bot_pad, c + left_pad + right_pad, k), np.uint8)\n te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im\n if top_pad:\n te_im[0:top_pad, left_pad:left_pad + c, :] = self.avg_chans\n if bot_pad:\n te_im[r + top_pad:, left_pad:left_pad + c, :] = self.avg_chans\n if left_pad:\n te_im[:, 0:left_pad, :] = self.avg_chans\n if right_pad:\n te_im[:, c + left_pad:, :] = self.avg_chans\n im_patch_original = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]\n else:\n im_patch_original = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]\n\n if not np.array_equal(model_size, original_sz):\n im_patch_original = cv.resize(im_patch_original, (model_size, model_size))\n return im_patch_original\n\n# Sample for using DaSiamRPN tracker\ndef main():\n parser = argparse.ArgumentParser(description=\"Run tracker\")\n parser.add_argument(\"--input\", type=str, help=\"Full path to input (empty for camera)\")\n parser.add_argument(\"--net\", type=str, default=\"dasiamrpn_model.onnx\", help=\"Full path to onnx model of net\")\n parser.add_argument(\"--kernel_r1\", type=str, default=\"dasiamrpn_kernel_r1.onnx\", help=\"Full path to onnx model of kernel_r1\")\n parser.add_argument(\"--kernel_cls1\", type=str, default=\"dasiamrpn_kernel_cls1.onnx\", help=\"Full path to onnx model of kernel_cls1\")\n args = parser.parse_args()\n point1 = ()\n point2 = ()\n mark = True\n drawing = False\n cx, cy, w, h = 0.0, 0.0, 0, 0\n # Fucntion for drawing during videostream\n def get_bb(event, x, y, flag, param):\n nonlocal point1, point2, cx, cy, w, h, drawing, mark\n\n if event == cv.EVENT_LBUTTONDOWN:\n if not drawing:\n drawing = True\n point1 = (x, y)\n else:\n drawing = False\n\n elif event == cv.EVENT_MOUSEMOVE:\n if drawing:\n point2 = (x, y)\n\n elif event == cv.EVENT_LBUTTONUP:\n cx = point1[0] - (point1[0] - point2[0]) / 2\n cy = point1[1] - (point1[1] - point2[1]) / 2\n w = abs(point1[0] - point2[0])\n h = abs(point1[1] - point2[1])\n mark = False\n\n # Creating window for visualization\n cap = cv.VideoCapture(args.input if args.input else 0)\n cv.namedWindow(\"DaSiamRPN\")\n cv.setMouseCallback(\"DaSiamRPN\", get_bb)\n\n whitespace_key = 32\n while cv.waitKey(40) != whitespace_key:\n has_frame, frame = cap.read()\n if not has_frame:\n sys.exit(0)\n cv.imshow(\"DaSiamRPN\", frame)\n\n while mark:\n twin = np.copy(frame)\n if point1 and point2:\n cv.rectangle(twin, point1, point2, (0, 255, 255), 3)\n cv.imshow(\"DaSiamRPN\", twin)\n cv.waitKey(40)\n\n init_bb = (cx, cy, w, h)\n tracker = DaSiamRPNTracker(args.net, args.kernel_r1, args.kernel_cls1)\n tracker.init(frame, init_bb)\n\n # Tracking loop\n while cap.isOpened():\n has_frame, frame = cap.read()\n if not has_frame:\n sys.exit(0)\n _, new_bb = tracker.update(frame)\n cx, cy, w, h = new_bb\n cv.rectangle(frame, (int(cx - w // 2), int(cy - h // 2)), (int(cx - w // 2) + int(w), int(cy - h // 2) + int(h)),(0, 255, 255), 3)\n cv.imshow(\"DaSiamRPN\", frame)\n key = cv.waitKey(1)\n if key == ord(\"q\"):\n break\n\n cap.release()\n cv.destroyAllWindows()\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array",
"numpy.array_equal",
"numpy.reshape",
"numpy.zeros",
"numpy.ascontiguousarray",
"numpy.hanning",
"numpy.copy",
"numpy.ones",
"numpy.tile",
"numpy.exp",
"numpy.mean",
"numpy.transpose",
"numpy.argmax",
"numpy.sqrt",
"numpy.maximum"
]
] |
ntyukaev/training_extensions | [
"c897d42e50828fea853ceda0795e1f0e7d6e9909"
] | [
"ote_cli/ote_cli/tools/utils/demo/visualization.py"
] | [
"\"\"\"\nVisualisation module.\n\"\"\"\n\n# Copyright (C) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\n\nimport cv2\nimport numpy as np\nfrom ote_sdk.entities.model_template import TaskType\n\n\ndef put_text_on_rect_bg(frame, message, position, color=(255, 255, 0)):\n \"\"\"Puts a text message on a black rectangular aread in specified position of a frame.\"\"\"\n\n font_face = cv2.FONT_HERSHEY_COMPLEX\n font_scale = 1\n thickness = 1\n color_bg = (0, 0, 0)\n x, y = position\n text_size, _ = cv2.getTextSize(message, font_face, font_scale, thickness)\n text_w, text_h = text_size\n cv2.rectangle(frame, position, (x + text_w + 1, y + text_h + 1), color_bg, -1)\n cv2.putText(\n frame,\n message,\n (x, y + text_h + font_scale - 1),\n font_face,\n font_scale,\n color,\n thickness,\n )\n return text_size\n\n\ndef draw_masks(frame, predictions, put_object_count=False):\n \"\"\"\n Converts predictions to masks and draw them on frame.\n \"\"\"\n\n frame = frame.copy()\n height, width = frame.shape[0], frame.shape[1]\n segments_image = frame.copy()\n aggregated_mask = np.zeros(frame.shape[:2], dtype=np.uint8)\n aggregated_colored_mask = np.zeros(frame.shape, dtype=np.uint8)\n for prediction in predictions:\n contours = np.array(\n [[(int(p.x * width), int(p.y * height)) for p in prediction.shape.points]]\n )\n assert len(prediction.get_labels()) == 1\n label = prediction.get_labels()[0]\n color = tuple(getattr(label.color, x) for x in (\"blue\", \"green\", \"red\"))\n mask = np.zeros(shape=(height, width), dtype=np.uint8)\n cv2.drawContours(mask, contours, -1, 255, -1)\n cv2.drawContours(frame, contours, -1, color, 1)\n rect = cv2.boundingRect(contours[0])\n cv2.rectangle(\n frame, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), color, 1\n )\n put_text_on_rect_bg(frame, label.name, (rect[0], rect[1]), color=color)\n cv2.bitwise_or(aggregated_mask, mask, dst=aggregated_mask)\n cv2.bitwise_or(\n aggregated_colored_mask,\n np.asarray(color, dtype=np.uint8),\n dst=aggregated_colored_mask,\n mask=mask,\n )\n # Fill the area occupied by all instances with a colored instances mask image.\n cv2.bitwise_and(\n segments_image,\n np.zeros(3, dtype=np.uint8),\n dst=segments_image,\n mask=aggregated_mask,\n )\n cv2.bitwise_or(\n segments_image,\n aggregated_colored_mask,\n dst=segments_image,\n mask=aggregated_mask,\n )\n # Blend original image with the one, where instances are colored.\n # As a result instances masks become transparent.\n cv2.addWeighted(frame, 0.5, segments_image, 0.5, 0, dst=frame)\n\n if put_object_count:\n put_text_on_rect_bg(frame, f\"Obj. count: {len(predictions)}\", (0, 0))\n return frame\n\n\ndef put_labels(frame, predictions):\n \"\"\"\n Converts predictions to text labels and puts them to the top left corner of a frame.\n \"\"\"\n\n frame = frame.copy()\n assert len(predictions) == 1\n # TODO (ilya-krylov): handle multi-label classification\n assert len(predictions[0].get_labels()) == 1\n label = predictions[0].get_labels()[0]\n color = tuple(getattr(label.color, x) for x in (\"blue\", \"green\", \"red\"))\n put_text_on_rect_bg(frame, label.name, (0, 0), color=color)\n return frame\n\n\ndef draw_bounding_boxes(frame, predictions, put_object_count):\n \"\"\"\n Converts predictions to bounding boxes and draws them on a frame.\n \"\"\"\n\n frame = frame.copy()\n height, width = frame.shape[0], frame.shape[1]\n for prediction in predictions:\n x1 = int(prediction.shape.x1 * width)\n x2 = int(prediction.shape.x2 * width)\n y1 = int(prediction.shape.y1 * height)\n y2 = int(prediction.shape.y2 * height)\n assert len(prediction.get_labels()) == 1\n label = prediction.get_labels()[0]\n color = tuple(getattr(label.color, x) for x in (\"blue\", \"green\", \"red\"))\n cv2.rectangle(frame, (x1, y1), (x2, y2), color, thickness=2)\n put_text_on_rect_bg(frame, label.name, (x1, y1), color=color)\n\n if put_object_count:\n put_text_on_rect_bg(frame, f\"Obj. count: {len(predictions)}\", (0, 0))\n return frame\n\n\ndef draw_predictions(task_type, predictions, frame, fit_to_size):\n \"\"\"\n Converts predictions to visual representations depending on task type and\n draws them on a frame.\n \"\"\"\n\n width, height = frame.shape[1], frame.shape[0]\n if fit_to_size:\n ratio_x = fit_to_size[0] / width\n ratio_y = fit_to_size[1] / height\n ratio = min(ratio_x, ratio_y)\n frame = cv2.resize(frame, None, fx=ratio, fy=ratio)\n if task_type == TaskType.DETECTION:\n frame = draw_bounding_boxes(frame, predictions, put_object_count=True)\n elif task_type in {TaskType.CLASSIFICATION, TaskType.ANOMALY_CLASSIFICATION}:\n frame = put_labels(frame, predictions)\n elif task_type in {TaskType.INSTANCE_SEGMENTATION, TaskType.ROTATED_DETECTION}:\n frame = draw_masks(frame, predictions, put_object_count=True)\n elif task_type in {TaskType.SEGMENTATION, TaskType.ANOMALY_SEGMENTATION}:\n frame = draw_masks(frame, predictions, put_object_count=False)\n else:\n raise ValueError(f\"Unknown task type: {task_type}\")\n return frame\n"
] | [
[
"numpy.asarray",
"numpy.zeros"
]
] |
luiztauffer/pandasVIS | [
"800c25c1f4aef29062a095149cedd5011262d2af"
] | [
"pandasvis/main.py"
] | [
"from PySide2 import QtCore\nfrom PySide2.QtCore import Qt\nfrom PySide2.QtWidgets import (QWidget, QApplication, QTreeWidgetItem, QLabel,\n QMainWindow, QFileDialog, QAction, QVBoxLayout,\n QGridLayout, QPushButton, QTreeWidgetItemIterator,\n QTabWidget, QSplitter, QTextEdit, QMessageBox,\n QHBoxLayout)\nfrom pandasvis.classes.trees import QTreeCustomPrimary, QTreeCustomSecondary\nfrom pandasvis.classes.console_widget import ConsoleWidget\nfrom pandasvis.utils.load_all_modules import load_all_modules\nfrom pandasvis.classes.trees import move_to_secondary, move_to_primary\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport os\nimport sys\nimport shutil\n\n\nclass Application(QMainWindow):\n def __init__(self, filename):\n super().__init__()\n\n self.resize(1200, 900)\n self.setWindowTitle('PandasVIS')\n\n # Initialize GUI elements\n self.init_gui()\n self.max_fig = False\n\n # Opens file (if argument was passed)\n if filename is not None:\n self.open_file()\n\n # Creates temp folder for temporary files\n self.temp_dir = os.path.join(os.getcwd(), 'temp')\n if not os.path.exists(self.temp_dir):\n os.makedirs(self.temp_dir, exist_ok=True)\n\n self.init_console()\n self.console.push_vars({'self': self})\n self.load_modules()\n self.show()\n\n def init_gui(self):\n \"\"\"Initiates GUI elements.\"\"\"\n mainMenu = self.menuBar()\n # File menu\n fileMenu = mainMenu.addMenu('File')\n # Adding actions to file menu\n action_open_file = QAction('Open File', self)\n fileMenu.addAction(action_open_file)\n action_open_file.triggered.connect(lambda: self.open_file(None))\n\n self.toolsMenu = mainMenu.addMenu('Tools')\n self.tabularMenu = self.toolsMenu.addMenu('Tabular')\n self.timeseriesMenu = self.toolsMenu.addMenu('Time Series')\n\n helpMenu = mainMenu.addMenu('Help')\n action_about = QAction('About', self)\n helpMenu.addAction(action_about)\n action_about.triggered.connect(self.about)\n\n # Left panels ----------------------------------------------------------\n self.bt_markall = QPushButton('Mark all')\n self.bt_markall.clicked.connect(self.mark_all)\n self.bt_unmarkall = QPushButton('Unmark all')\n self.bt_unmarkall.clicked.connect(self.unmark_all)\n self.bt_test = QPushButton(' ')\n self.bt_test.clicked.connect(self.test)\n\n self.tree_primary = QTreeCustomPrimary(parent=self)\n self.tree_primary.setAlternatingRowColors(True)\n self.tree_primary.setHeaderLabels(['Primary Variables', 'type'])\n self.tree_primary.setToolTip(\"Columns of the Dataframe. Can be accessed\\n\"\n \"in the console with the variable 'df'\")\n self.tree_primary.itemClicked.connect(self.update_selected_primary)\n\n self.bt_toprimary = QPushButton('To primary')\n self.bt_toprimary.clicked.connect(self.to_primary)\n self.bt_tosecondary = QPushButton('To secondary')\n self.bt_tosecondary.clicked.connect(self.to_secondary)\n self.hbox_l1 = QHBoxLayout()\n self.hbox_l1.addWidget(self.bt_toprimary)\n self.hbox_l1.addWidget(self.bt_tosecondary)\n\n self.tree_secondary = QTreeCustomSecondary(parent=self)\n self.tree_secondary.setAlternatingRowColors(True)\n self.tree_secondary.setHeaderLabels(['Secondary Variables', 'type'])\n self.tree_secondary.setToolTip(\"Secondary variables, can be added to the Dataframe.\\n\"\n \"Can be accessed in the console with the variable \\n\"\n \"'secondary_vars'\")\n self.tree_secondary.itemClicked.connect(self.update_selected_secondary)\n\n self.df = pd.DataFrame(np.random.rand(100, 5), columns=['a', 'b', 'c', 'd', 'e'])\n names = ['aa', 'bb', 'cc']\n nm = [names[ind % 3] for ind in np.arange(100)]\n self.df['name'] = nm\n names_2 = ['abcd', 'ab789', 'another_class', 'yet_another', 'dfg65']\n nm_2 = [names_2[ind % 5] for ind in np.arange(100)]\n self.df['name_2'] = nm_2\n self.primary_names = list(self.df.keys())\n self.secondary_vars = {'var 3': np.zeros(100), 'var 4': np.zeros(100)}\n self.secondary_names = list(self.secondary_vars.keys())\n self.init_trees()\n\n self.vbox1 = QVBoxLayout()\n self.vbox1.addLayout(self.hbox_l1)\n self.vbox1.addWidget(self.tree_secondary)\n self.wbox1 = QWidget()\n self.wbox1.setLayout(self.vbox1)\n self.vsplit1 = QSplitter(Qt.Vertical)\n self.vsplit1.addWidget(self.tree_primary)\n self.vsplit1.addWidget(self.wbox1)\n\n self.grid_left1 = QGridLayout()\n self.grid_left1.setColumnStretch(5, 1)\n self.grid_left1.addWidget(self.bt_markall, 0, 0, 1, 2)\n self.grid_left1.addWidget(self.bt_unmarkall, 0, 2, 1, 2)\n self.grid_left1.addWidget(self.bt_test, 0, 4, 1, 1)\n self.grid_left1.addWidget(self.vsplit1, 1, 0, 1, 6)\n self.left_widget = QWidget()\n self.left_widget.setLayout(self.grid_left1)\n\n # Center panels -------------------------------------------------------\n # Top tabs\n self.tabs_top = QTabWidget()\n self.tab0 = QWidget()\n self.tabs_top.addTab(self.tab0, \"Tools\")\n\n # Bottom tabs\n self.tabs_bottom = QTabWidget()\n self.console = ConsoleWidget(par=self)\n self.console.setToolTip(\n \"df --> Dataframe with Primary variables\\n\"\n \"secondary_vars --> Dictionary with Secondary variables\")\n self.logger = QTextEdit()\n self.logger.setReadOnly(True)\n self.tabs_bottom.addTab(self.console, \"Console\")\n self.tabs_bottom.addTab(self.logger, \"Logger\")\n\n self.righ_widget = QSplitter(Qt.Vertical)\n self.righ_widget.addWidget(self.tabs_top)\n self.righ_widget.addWidget(self.tabs_bottom)\n\n # Window layout --------------------------------------------------------\n self.hbox = QSplitter(Qt.Horizontal)\n self.hbox.addWidget(self.left_widget) # add left panel\n self.hbox.addWidget(self.righ_widget) # add centre panel\n self.setCentralWidget(self.hbox)\n\n def test(self):\n pass\n\n def load_modules(self):\n # Main tools tab buttons layout\n self.tools_grid = QGridLayout()\n self.tools_grid.setColumnStretch(3, 1)\n self.lbl_tabular = QLabel(\"Tabular\")\n self.tools_grid.addWidget(self.lbl_tabular, 0, 0, 1, 2)\n # modules and lambdas lists\n self.instances_list = []\n self.modules_list = load_all_modules()\n self.lambdas_list = [(lambda a: lambda: self.instantiate_module(a))(o) for o in self.modules_list]\n for i, module in enumerate(self.modules_list):\n # Populates Menu bar\n action = QAction(module.menu_name, self)\n action.triggered.connect(self.lambdas_list[i])\n if module.menu_parent == 'None':\n self.toolsMenu.addAction(action)\n if module.menu_parent == 'Tabular':\n self.tabularMenu.addAction(action)\n elif module.menu_parent == 'Time Series':\n self.timeseriesMenu.addAction(action)\n # Populates buttons tab\n btn = QPushButton(module.menu_name)\n btn.clicked.connect(self.lambdas_list[i])\n self.tools_grid.addWidget(btn, 1, i, 1, 1)\n self.tools_grid.addWidget(QWidget(), 1, i + 1, 1, 1)\n self.tools_grid.addWidget(QWidget(), 2, 0, 1, 1)\n self.tools_grid.setRowStretch(3, 1)\n self.tab0.setLayout(self.tools_grid)\n\n def instantiate_module(self, module):\n \"\"\"Instantiates a chosen module class.\"\"\"\n obj = module(self)\n # Check how many instances of same class already exist\n nInst = sum([item.menu_name == obj.menu_name for item in self.instances_list])\n obj.name += '_' + str(nInst)\n obj.run()\n self.instances_list.append(obj)\n\n def open_file(self, filename):\n ''' Open file and store it as a Pandas Dataframe.'''\n if filename is None:\n filename, ftype = QFileDialog.getOpenFileName(None, 'Open file', '', \"(*.csv)\")\n if ftype == '(*.csv)':\n self.file_path = filename\n self.setWindowTitle('PandasVIS - ' + os.path.split(os.path.abspath(self.file_path))[1])\n # Load primary variables\n self.df = pd.read_csv(self.file_path)\n self.primary_names = self.df.keys().tolist()\n # Reset secondary variables\n self.secondary_vars = {'var 3': np.zeros(100), 'var 4': np.zeros(100)}\n self.secondary_names = list(self.secondary_vars.keys())\n # Reset GUI\n self.init_trees()\n self.init_console()\n\n def init_trees(self):\n ''' Draw hierarchical tree of fields in NWB file '''\n self.tree_primary.clear()\n self.tree_secondary.clear()\n for var1 in self.primary_names: # primary variables list\n parent = QTreeWidgetItem(self.tree_primary, [var1, str(self.df[var1].dtype)])\n parent.setFlags(parent.flags() | QtCore.Qt.ItemIsTristate | QtCore.Qt.ItemIsUserCheckable)\n parent.setCheckState(0, QtCore.Qt.Checked)\n for var2 in self.secondary_names: # secondary variables list\n parent = QTreeWidgetItem(self.tree_secondary, [var2, str(self.secondary_vars[var2].dtype)])\n parent.setFlags(parent.flags() | QtCore.Qt.ItemIsTristate | QtCore.Qt.ItemIsUserCheckable)\n parent.setCheckState(0, QtCore.Qt.Checked)\n\n def init_console(self):\n ''' Initialize commands on console '''\n self.console._execute(\"import pandas as pd\", True)\n self.console._execute(\"import numpy as np\", True)\n self.console._execute(\"import matplotlib.pyplot as plt\", True)\n self.console.push_vars({'df': self.df})\n self.console.push_vars({'secondary_vars': self.secondary_vars})\n self.console.clear()\n self.console.print_text('df --> Dataframe with Primary variables\\n')\n self.console.print_text('secondary_vars --> Dictionary with Secondary variables\\n\\n')\n\n def new_tab_top(self, object, title):\n \"\"\"Opens new tab.\"\"\"\n self.tabs_top.addTab(object, title)\n nTabs = self.tabs_top.children()[0].count()\n self.tabs_top.setCurrentIndex(nTabs - 1)\n\n def new_tab_bottom(self, tab_object, title):\n \"\"\"Opens new tab.\"\"\"\n self.tabs_bottom.addTab(tab_object, title)\n\n def close_tab_top(self, object):\n \"\"\"Closes tab and removes associated objects\"\"\"\n name = object.name\n # Closes all child Threads\n object.close_threads()\n # Removes tab\n curr_ind = self.tabs_top.children()[0].currentIndex()\n self.tabs_top.removeTab(curr_ind)\n # Removes specific object instance from list\n self.instances_list.remove(object)\n # Deletes object form memory\n object.deleteLater()\n self.write_to_logger(name + ' deleted!')\n\n def write_to_logger(self, txt):\n time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n full_txt = \"[\" + time + \"] \" + txt\n self.logger.append(full_txt)\n\n def toggle_max_figure(self):\n if self.max_fig:\n self.left_widget.setMaximumWidth(1000)\n self.left_widget.resize(self.left_widget.sizeHint())\n self.tabs_bottom.setMaximumHeight(1000)\n self.tabs_bottom.resize(self.tabs_bottom.sizeHint())\n self.max_fig = False\n else:\n self.left_widget.setMaximumWidth(0)\n self.tabs_bottom.setMaximumHeight(0)\n self.max_fig = True\n\n def mark_all(self):\n \"\"\"Iterate over all nodes of the tree and marks them.\"\"\"\n self.iterator = QTreeWidgetItemIterator(self.tree_primary, QTreeWidgetItemIterator.All)\n while self.iterator.value():\n item = self.iterator.value()\n item.setCheckState(0, QtCore.Qt.Checked)\n self.iterator += 1\n\n def unmark_all(self):\n \"\"\"Iterate over all nodes of the tree and unmarks them.\"\"\"\n self.iterator = QTreeWidgetItemIterator(self.tree_primary, QTreeWidgetItemIterator.All)\n while self.iterator.value():\n item = self.iterator.value()\n item.setCheckState(0, QtCore.Qt.Unchecked)\n self.iterator += 1\n\n def update_selected_primary(self):\n \"\"\"Iterate over all nodes of the tree and save selected items names to list\"\"\"\n self.selected_primary = []\n self.iterator = QTreeWidgetItemIterator(self.tree_primary, QTreeWidgetItemIterator.All)\n while self.iterator.value():\n item = self.iterator.value()\n if item.checkState(0) == 2: # full-box checked, add item to dictionary\n self.selected_primary.append(item.text(0))\n self.iterator += 1\n\n def update_selected_secondary(self):\n \"\"\"Iterate over all nodes of the tree and save selected items names to list\"\"\"\n self.selected_secondary = []\n self.iterator = QTreeWidgetItemIterator(self.tree_secondary, QTreeWidgetItemIterator.All)\n while self.iterator.value():\n item = self.iterator.value()\n if item.checkState(0) == 2: # full-box checked, add item to dictionary\n self.selected_secondary.append(item.text(0))\n self.iterator += 1\n\n def to_primary(self):\n self.iterator = QTreeWidgetItemIterator(self.tree_secondary, QTreeWidgetItemIterator.All)\n selected = []\n while self.iterator.value():\n item = self.iterator.value()\n if item.checkState(0) == 2: # full-box checked\n selected.append(item.text(0))\n self.iterator += 1\n for var in selected:\n move_to_primary(self, var)\n\n def to_secondary(self):\n self.iterator = QTreeWidgetItemIterator(self.tree_primary, QTreeWidgetItemIterator.All)\n selected = []\n while self.iterator.value():\n item = self.iterator.value()\n if item.checkState(0) == 2: # full-box checked\n selected.append(item.text(0))\n self.iterator += 1\n for var in selected:\n move_to_secondary(self, var)\n\n def insert_from_file():\n filename, ftype = QFileDialog.getOpenFileName(None, 'Open file', '', \"(*.csv)\")\n if ftype == '(*.csv)':\n self.file_path = filename\n # Load primary variables\n df_new = pd.read_csv(self.file_path)\n self.primary_names = self.df.keys().tolist()\n\n def closeEvent(self, event):\n \"\"\"Before exiting, deletes temporary files.\"\"\"\n shutil.rmtree(self.temp_dir, ignore_errors=False, onerror=None)\n event.accept()\n\n def about(self):\n \"\"\"About dialog.\"\"\"\n msg = QMessageBox()\n msg.setWindowTitle(\"About PandasVIS\")\n msg.setIcon(QMessageBox.Information)\n msg.setText(\"Version: 1.0.0 \\n\"\n \"Data exploration GUI, with Data Science and Machine Learning embedded tools.\\n \")\n msg.setInformativeText(\"<a href='https://github.com/luiztauffer/pandasVIS'>PandasVIS Github page</a>\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.exec_()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv) # instantiate a QtGui (holder for the app)\n if len(sys.argv) == 1:\n fname = None\n else:\n fname = sys.argv[1]\n ex = Application(filename=fname)\n sys.exit(app.exec_())\n\n\ndef main(filename=None): # If it was imported as a module\n \"\"\"Sets up QT application.\"\"\"\n app = QtCore.QCoreApplication.instance()\n if app is None:\n app = QApplication(sys.argv) # instantiate a QtGui (holder for the app)\n ex = Application(filename=filename)\n sys.exit(app.exec_())\n"
] | [
[
"pandas.read_csv",
"numpy.random.rand",
"numpy.zeros",
"numpy.arange"
]
] |
manpen/nemo-eva | [
"079680f04da049720eb6b5817c086bc9866f5154"
] | [
"src/helpers/tail_estimation.py"
] | [
"import sys\nimport time\nimport argparse\nimport os\nimport warnings\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# =========================================\n# ========== Auxiliary Functions ==========\n# =========================================\n\ndef add_uniform_noise(data_sequence, p = 1):\n \"\"\"\n Function to add uniform random noise to a given dataset.\n Uniform noise in range [-5*10^(-p), 5*10^(-p)] is added to each\n data entry. For integer-valued sequences, p = 1.\n\n Args:\n data_sequence: numpy array of data to be processed.\n p: integer parameter controlling noise amplitude.\n\n Returns:\n numpy array with noise-added entries.\n \"\"\"\n if p < 1:\n print(\"Parameter p should be greater or equal to 1.\")\n return None\n noise = np.random.uniform(-5.*10**(-p), 5*10**(-p), size = len(data_sequence))\n randomized_data_sequence = data_sequence + noise\n # ensure there are no negative entries after noise is added\n randomized_data_sequence = randomized_data_sequence[np.where(randomized_data_sequence > 0)]\n return randomized_data_sequence\n\n\ndef get_distribution(data_sequence, number_of_bins = 30):\n \"\"\"\n Function to get a log-binned distribution of a given dataset.\n\n Args:\n data_sequence: numpy array with data to calculate\n log-binned PDF on.\n number_of_bins: number of logarithmic bins to use.\n\n Returns:\n x, y: numpy arrays containing midpoints of bins\n and corresponding PDF values.\n\n \"\"\"\n # define the support of the distribution\n lower_bound = min(data_sequence)\n upper_bound = max(data_sequence)\n # define bin edges\n log = np.log10\n lower_bound = log(lower_bound) if lower_bound > 0 else -1\n upper_bound = log(upper_bound)\n bins = np.logspace(lower_bound, upper_bound, number_of_bins)\n \n # compute the histogram using numpy\n y, __ = np.histogram(data_sequence, bins = bins, density = True)\n # for each bin, compute its midpoint\n x = bins[1:] - np.diff(bins) / 2.0\n # if bin is empty, drop it from the resulting list\n drop_indices = [i for i,k in enumerate(y) if k == 0.0]\n x = [k for i,k in enumerate(x) if i not in drop_indices]\n y = [k for i,k in enumerate(y) if i not in drop_indices]\n return x, y\n\ndef get_ccdf(degree_sequence):\n \"\"\"\n Function to get CCDF of the list of degrees.\n \n Args:\n degree_sequence: numpy array of nodes' degrees.\n\n Returns:\n uniques: unique degree values met in the sequence.\n 1-CDF: CCDF values corresponding to the unique values\n from the 'uniques' array.\n \"\"\"\n uniques, counts = np.unique(degree_sequence, return_counts=True)\n cumprob = np.cumsum(counts).astype(np.double) / (degree_sequence.size)\n return uniques[::-1], (1. - cumprob)[::-1]\n \n# ================================================\n# ========== Hill Tail Index Estimation ==========\n# ================================================\ndef get_moments_estimates_1(ordered_data):\n \"\"\"\n Function to calculate first moments array given an ordered data\n sequence. Decreasing ordering is required.\n\n Args:\n ordered_data: numpy array of ordered data for which\n the 1st moment (Hill estimator)\n is calculated.\n Returns:\n M1: numpy array of 1st moments (Hill estimator)\n corresponding to all possible order statistics\n of the dataset.\n\n \"\"\"\n logs_1 = np.log(ordered_data)\n logs_1_cumsum = np.cumsum(logs_1[:-1])\n k_vector = np.arange(1, len(ordered_data))\n M1 = (1./k_vector)*logs_1_cumsum - logs_1[1:]\n return M1\n\ndef get_moments_estimates_2(ordered_data):\n \"\"\"\n Function to calculate first and second moments arrays\n given an ordered data sequence. \n Decreasing ordering is required.\n\n Args:\n ordered_data: numpy array of ordered data for which\n the 1st (Hill estimator) and 2nd moments \n are calculated.\n Returns:\n M1: numpy array of 1st moments (Hill estimator)\n corresponding to all possible order statistics\n of the dataset.\n M2: numpy array of 2nd moments corresponding to all \n possible order statistics of the dataset.\n\n \"\"\"\n logs_1 = np.log(ordered_data)\n logs_2 = (np.log(ordered_data))**2\n logs_1_cumsum = np.cumsum(logs_1[:-1])\n logs_2_cumsum = np.cumsum(logs_2[:-1])\n k_vector = np.arange(1, len(ordered_data))\n M1 = (1./k_vector)*logs_1_cumsum - logs_1[1:]\n M2 = (1./k_vector)*logs_2_cumsum - (2.*logs_1[1:]/k_vector)*logs_1_cumsum\\\n + logs_2[1:]\n return M1, M2\n\ndef get_moments_estimates_3(ordered_data):\n \"\"\"\n Function to calculate first, second and third moments \n arrays given an ordered data sequence. \n Decreasing ordering is required.\n\n Args:\n ordered_data: numpy array of ordered data for which\n the 1st (Hill estimator), 2nd and 3rd moments \n are calculated.\n Returns:\n M1: numpy array of 1st moments (Hill estimator)\n corresponding to all possible order statistics\n of the dataset.\n M2: numpy array of 2nd moments corresponding to all \n possible order statistics of the dataset.\n M3: numpy array of 3rd moments corresponding to all \n possible order statistics of the dataset.\n\n \"\"\"\n logs_1 = np.log(ordered_data)\n logs_2 = (np.log(ordered_data))**2\n logs_3 = (np.log(ordered_data))**3\n logs_1_cumsum = np.cumsum(logs_1[:-1])\n logs_2_cumsum = np.cumsum(logs_2[:-1])\n logs_3_cumsum = np.cumsum(logs_3[:-1])\n k_vector = np.arange(1, len(ordered_data))\n M1 = (1./k_vector)*logs_1_cumsum - logs_1[1:]\n M2 = (1./k_vector)*logs_2_cumsum - (2.*logs_1[1:]/k_vector)*logs_1_cumsum\\\n + logs_2[1:]\n M3 = (1./k_vector)*logs_3_cumsum - (3.*logs_1[1:]/k_vector)*logs_2_cumsum\\\n + (3.*logs_2[1:]/k_vector)*logs_1_cumsum - logs_3[1:]\n # cleaning exceptional cases\n clean_indices = np.where((M2 <= 0) | (M3 == 0) | (np.abs(1.-(M1**2)/M2) < 1e-10)\\\n |(np.abs(1.-(M1*M2)/M3) < 1e-10))\n M1[clean_indices] = np.nan\n M2[clean_indices] = np.nan\n M3[clean_indices] = np.nan\n return M1, M2, M3\n\ndef hill_dbs(ordered_data, t_bootstrap = 0.5,\n r_bootstrap = 500, eps_stop = 1.0,\n verbose = False, diagn_plots = False):\n \"\"\"\n Function to perform double-bootstrap procedure for\n Hill estimator.\n\n Args:\n ordered_data: numpy array for which double-bootstrap\n is performed. Decreasing ordering is required.\n t_bootstrap: parameter controlling the size of the 2nd\n bootstrap. Defined from n2 = n*(t_bootstrap).\n r_bootstrap: number of bootstrap resamplings for the 1st and 2nd\n bootstraps.\n eps_stop: parameter controlling range of AMSE minimization.\n Defined as the fraction of order statistics to consider\n during the AMSE minimization step.\n verbose: flag controlling bootstrap verbosity. \n diagn_plots: flag to switch on/off generation of AMSE diagnostic\n plots.\n\n Returns:\n k_star: number of order statistics optimal for estimation\n according to the double-bootstrap procedure.\n x1_arr: array of fractions of order statistics used for the\n 1st bootstrap sample.\n n1_amse: array of AMSE values produced by the 1st bootstrap\n sample.\n k1_min: value of fraction of order statistics corresponding\n to the minimum of AMSE for the 1st bootstrap sample.\n max_index1: index of the 1st bootstrap sample's order statistics\n array corresponding to the minimization boundary set\n by eps_stop parameter.\n x2_arr: array of fractions of order statistics used for the\n 2nd bootstrap sample.\n n2_amse: array of AMSE values produced by the 2nd bootstrap\n sample.\n k2_min: value of fraction of order statistics corresponding\n to the minimum of AMSE for the 2nd bootstrap sample.\n max_index2: index of the 2nd bootstrap sample's order statistics\n array corresponding to the minimization boundary set\n by eps_stop parameter.\n\n \"\"\"\n if verbose:\n print(\"Performing Hill double-bootstrap...\")\n n = len(ordered_data)\n eps_bootstrap = 0.5*(1+np.log(int(t_bootstrap*n))/np.log(n))\n n1 = int(n**eps_bootstrap)\n samples_n1 = np.zeros(n1-1)\n good_counts1 = np.zeros(n1-1)\n k1 = None\n k2 = None\n min_index1 = 1\n min_index2 = 1\n while k2 == None:\n # first bootstrap with n1 sample size\n for i in range(r_bootstrap):\n sample = np.random.choice(ordered_data, n1, replace = True)\n sample[::-1].sort()\n M1, M2 = get_moments_estimates_2(sample)\n current_amse1 = (M2 - 2.*(M1)**2)**2\n samples_n1 += current_amse1\n good_counts1[np.where(current_amse1 != np.nan)] += 1\n averaged_delta = samples_n1 / good_counts1\n \n max_index1 = (np.abs(np.linspace(1./n1, 1.0, n1) - eps_stop)).argmin()\n k1 = np.nanargmin(averaged_delta[min_index1:max_index1]) + 1 + min_index1 #take care of indexing\n if diagn_plots:\n n1_amse = averaged_delta\n x1_arr = np.linspace(1./n1, 1.0, n1)\n \n # second bootstrap with n2 sample size\n n2 = int(n1*n1/float(n))\n samples_n2 = np.zeros(n2-1)\n good_counts2 = np.zeros(n2-1)\n \n for i in range(r_bootstrap):\n sample = np.random.choice(ordered_data, n2, replace = True)\n sample[::-1].sort()\n M1, M2 = get_moments_estimates_2(sample)\n current_amse2 = (M2 - 2.*(M1**2))**2\n samples_n2 += current_amse2\n good_counts2[np.where(current_amse2 != np.nan)] += 1\n max_index2 = (np.abs(np.linspace(1./n2, 1.0, n2) - eps_stop)).argmin()\n averaged_delta = samples_n2 / good_counts2\n \n max_index1 = (np.abs(np.linspace(1./n1, 1.0, n1) - eps_stop)).argmin()\n k2 = np.nanargmin(averaged_delta[min_index2:max_index2]) + 1 + min_index2 #take care of indexing\n if diagn_plots:\n n2_amse = averaged_delta\n x2_arr = np.linspace(1./n2, 1.0, n2)\n\n if k2 > k1:\n # print(\"Warning (Hill): k2 > k1, AMSE false minimum suspected, resampling...\")\n # move left AMSE boundary to avoid numerical issues\n min_index1 = min_index1 + int(0.005*n)\n min_index2 = min_index2 + int(0.005*n)\n k2 = None\n \n '''\n # this constant is provided in the Danielsson's paper\n # use instead of rho below if needed\n rho = (np.log(k1)/(2.*np.log(n1) - np.log(k1)))\\\n **(2.*(np.log(n1) - np.log(k1))/(np.log(n1)))\n '''\n \n # this constant is provided in Qi's paper\n rho = (1. - (2*(np.log(k1) - np.log(n1))/(np.log(k1))))**(np.log(k1)/np.log(n1) - 1.)\n \n k_star = (k1*k1/float(k2)) * rho\n k_star = int(np.round(k_star))\n \n # enforce k_star to pick 2nd value (rare cases of extreme cutoffs)\n if k_star == 0:\n k_star = 2\n if int(k_star) >= len(ordered_data):\n print(\"WARNING: estimated threshold k is larger than the size of data\")\n k_star = len(ordered_data)-1\n if verbose:\n print(\"--- Hill double-bootstrap information ---\")\n print(\"Size of the 1st bootstrap sample n1:\", n1)\n print(\"Size of the 2nd bootstrap sample n2:\", n2)\n print(\"Estimated k1:\", k1)\n print(\"Estimated k2:\", k2)\n print(\"Estimated constant rho:\", rho)\n print(\"Estimated optimal k:\", k_star)\n print(\"-----------------------------------------\")\n if not diagn_plots:\n x1_arr, x2_arr, n1_amse, n2_amse = None, None, None, None\n return k_star, x1_arr, n1_amse, k1/float(n1), max_index1, x2_arr, n2_amse, k2/float(n2), max_index2\n\ndef hill_estimator(ordered_data,\n bootstrap = True, t_bootstrap = 0.5,\n r_bootstrap = 500, verbose = False,\n diagn_plots = False, eps_stop = 0.99):\n \"\"\"\n Function to calculate Hill estimator for a given dataset.\n If bootstrap flag is True, double-bootstrap procedure\n for estimation of the optimal number of order statistics is\n performed.\n\n Args:\n ordered_data: numpy array for which tail index estimation\n is performed. Decreasing ordering is required.\n bootstrap: flag to switch on/off double-bootstrap procedure.\n t_bootstrap: parameter controlling the size of the 2nd\n bootstrap. Defined from n2 = n*(t_bootstrap).\n r_bootstrap: number of bootstrap resamplings for the 1st and 2nd\n bootstraps.\n eps_stop: parameter controlling range of AMSE minimization.\n Defined as the fraction of order statistics to consider\n during the AMSE minimization step.\n verbose: flag controlling bootstrap verbosity. \n diagn_plots: flag to switch on/off generation of AMSE diagnostic\n plots.\n\n Returns:\n results: list containing an array of order statistics,\n an array of corresponding tail index estimates,\n the optimal order statistic estimated by double-\n bootstrap and the corresponding tail index,\n an array of fractions of order statistics used for\n the 1st bootstrap sample with an array of corresponding\n AMSE values, value of fraction of order statistics\n corresponding to the minimum of AMSE for the 1st bootstrap\n sample, index of the 1st bootstrap sample's order statistics\n array corresponding to the minimization boundary set\n by eps_stop parameter; and the same characteristics for the\n 2nd bootstrap sample.\n \"\"\"\n k_arr = np.arange(1, len(ordered_data))\n xi_arr = get_moments_estimates_1(ordered_data)\n if bootstrap:\n results = hill_dbs(ordered_data,\n t_bootstrap = t_bootstrap,\n r_bootstrap = r_bootstrap,\n verbose = verbose, \n diagn_plots = diagn_plots,\n eps_stop = eps_stop)\n k_star, x1_arr, n1_amse, k1, max_index1, x2_arr, n2_amse, k2, max_index2 = results\n while k_star == None:\n print(\"Resampling...\")\n results = hill_dbs(ordered_data,\n t_bootstrap = t_bootstrap,\n r_bootstrap = r_bootstrap,\n verbose = verbose, \n diagn_plots = diagn_plots,\n eps_stop = eps_stop)\n k_star, x1_arr, n1_amse, k1, max_index1, x2_arr, n2_amse, k2, max_index2 = results\n xi_star = xi_arr[k_star-1]\n #print(\"Adjusted Hill estimated gamma:\", 1 + 1./xi_star)\n #print(\"**********\")\n else:\n k_star, xi_star = None, None\n x1_arr, n1_amse, k1, max_index1 = 4*[None]\n x2_arr, n2_amse, k2, max_index2 = 4*[None]\n results = [k_arr, xi_arr, k_star, xi_star, x1_arr, n1_amse, k1, max_index1,\\\n x2_arr, n2_amse, k2, max_index2]\n return results\n\ndef smooth_hill_estimator(ordered_data, r_smooth = 2):\n \"\"\"\n Function to calculate smooth Hill estimator for a\n given ordered dataset.\n\n Args:\n ordered_data: numpy array for which tail index estimation\n is performed. Decreasing ordering is required.\n r_smooth: integer parameter controlling the width\n of smoothing window. Typically small\n value such as 2 or 3.\n Returns:\n k_arr: numpy array of order statistics based on the data provided.\n xi_arr: numpy array of tail index estimates corresponding to \n the order statistics array k_arr.\n \"\"\"\n n = len(ordered_data)\n M1 = get_moments_estimates_1(ordered_data)\n xi_arr = np.zeros(int(np.floor(float(n)/r_smooth)))\n k_arr = np.arange(1, int(np.floor(float(n)/r_smooth))+1)\n xi_arr[0] = M1[0]\n bin_lengths = np.array([1.]+[float((r_smooth-1)*k) for k in k_arr[:-1]])\n cum_sum = 0.0\n for i in range(1, r_smooth*int(np.floor(float(n)/r_smooth))-1):\n k = i\n cum_sum += M1[k]\n if (k+1) % (r_smooth) == 0:\n xi_arr[int(k+1)//int(r_smooth)] = cum_sum\n cum_sum -= M1[int(k+1)//int(r_smooth)]\n xi_arr = xi_arr/bin_lengths\n return k_arr, xi_arr\n\n# ===================================================\n# ========== Moments Tail Index Estimation ==========\n# ===================================================\n\ndef moments_dbs_prefactor(xi_n, n1, k1):\n \"\"\" \n Function to calculate pre-factor used in moments\n double-bootstrap procedure.\n\n Args:\n xi_n: moments tail index estimate corresponding to\n sqrt(n)-th order statistic.\n n1: size of the 1st bootstrap in double-bootstrap\n procedure.\n k1: estimated optimal order statistic based on the 1st\n bootstrap sample.\n\n Returns:\n prefactor: constant used in estimation of the optimal\n stopping order statistic for moments estimator.\n \"\"\"\n def V_sq(xi_n):\n if xi_n >= 0:\n V = 1. + (xi_n)**2\n return V\n else:\n a = (1.-xi_n)**2\n b = (1-2*xi_n)*(6*((xi_n)**2)-xi_n+1)\n c = (1.-3*xi_n)*(1-4*xi_n)\n V = a*b/c\n return V\n\n def V_bar_sq(xi_n):\n if xi_n >= 0:\n V = 0.25*(1+(xi_n)**2)\n return V\n else:\n a = 0.25*((1-xi_n)**2)\n b = 1-8*xi_n+48*(xi_n**2)-154*(xi_n**3)\n c = 263*(xi_n**4)-222*(xi_n**5)+72*(xi_n**6)\n d = (1.-2*xi_n)*(1-3*xi_n)*(1-4*xi_n)\n e = (1.-5*xi_n)*(1-6*xi_n)\n V = a*(b+c)/(d*e)\n return V\n \n def b(xi_n, rho):\n if xi_n < rho:\n a1 = (1.-xi_n)*(1-2*xi_n)\n a2 = (1.-rho-xi_n)*(1.-rho-2*xi_n)\n return a1/a2\n elif xi_n >= rho and xi_n < 0:\n return 1./(1-xi_n)\n else:\n b = (xi_n/(rho*(1.-rho))) + (1./((1-rho)**2))\n return b\n\n def b_bar(xi_n, rho):\n if xi_n < rho:\n a1 = 0.5*(-rho*(1-xi_n)**2)\n a2 = (1.-xi_n-rho)*(1-2*xi_n-rho)*(1-3*xi_n-rho)\n return a1/a2\n elif xi_n >= rho and xi_n < 0:\n a1 = 1-2*xi_n-np.sqrt((1-xi_n)*(1-2.*xi_n))\n a2 = (1.-xi_n)*(1-2*xi_n)\n return a1/a2\n else:\n b = (-1.)*((rho + xi_n*(1-rho))/(2*(1-rho)**3))\n return b\n\n rho = np.log(k1)/(2*np.log(k1) - 2.*np.log(n1))\n a = (V_sq(xi_n)) * (b_bar(xi_n, rho)**2)\n b = V_bar_sq(xi_n) * (b(xi_n, rho)**2)\n prefactor = (a/b)**(1./(1. - 2*rho))\n return prefactor\n\ndef moments_dbs(ordered_data, xi_n, t_bootstrap = 0.5,\n r_bootstrap = 500, eps_stop = 1.0,\n verbose = False, diagn_plots = False):\n \"\"\"\n Function to perform double-bootstrap procedure for \n moments estimator.\n\n Args:\n ordered_data: numpy array for which double-bootstrap\n is performed. Decreasing ordering is required.\n xi_n: moments tail index estimate corresponding to\n sqrt(n)-th order statistic.\n t_bootstrap: parameter controlling the size of the 2nd\n bootstrap. Defined from n2 = n*(t_bootstrap).\n r_bootstrap: number of bootstrap resamplings for the 1st and 2nd\n bootstraps.\n eps_stop: parameter controlling range of AMSE minimization.\n Defined as the fraction of order statistics to consider\n during the AMSE minimization step.\n verbose: flag controlling bootstrap verbosity. \n diagn_plots: flag to switch on/off generation of AMSE diagnostic\n plots.\n \n\n Returns:\n k_star: number of order statistics optimal for estimation\n according to the double-bootstrap procedure.\n x1_arr: array of fractions of order statistics used for the\n 1st bootstrap sample.\n n1_amse: array of AMSE values produced by the 1st bootstrap\n sample.\n k1_min: value of fraction of order statistics corresponding\n to the minimum of AMSE for the 1st bootstrap sample.\n max_index1: index of the 1st bootstrap sample's order statistics\n array corresponding to the minimization boundary set\n by eps_stop parameter.\n x2_arr: array of fractions of order statistics used for the\n 2nd bootstrap sample.\n n2_amse: array of AMSE values produced by the 2nd bootstrap\n sample.\n k2_min: value of fraction of order statistics corresponding\n to the minimum of AMSE for the 2nd bootstrap sample.\n max_index2: index of the 2nd bootstrap sample's order statistics\n array corresponding to the minimization boundary set\n by eps_stop parameter.\n \"\"\"\n if verbose:\n print(\"Performing moments double-bootstrap...\")\n n = len(ordered_data)\n eps_bootstrap = 0.5*(1+np.log(int(t_bootstrap*n))/np.log(n))\n\n # first bootstrap with n1 sample size\n n1 = int(n**eps_bootstrap)\n samples_n1 = np.zeros(n1-1)\n good_counts1 = np.zeros(n1-1)\n for i in range(r_bootstrap):\n sample = np.random.choice(ordered_data, n1, replace = True)\n sample[::-1].sort()\n M1, M2, M3 = get_moments_estimates_3(sample)\n xi_2 = M1 + 1. - 0.5*((1. - (M1*M1)/M2))**(-1.)\n xi_3 = np.sqrt(0.5*M2) + 1. - (2./3.)*(1. / (1. - M1*M2/M3))\n samples_n1 += (xi_2 - xi_3)**2\n good_counts1[np.where((xi_2 - xi_3)**2 != np.nan)] += 1\n max_index1 = (np.abs(np.linspace(1./n1, 1.0, n1) - eps_stop)).argmin()\n averaged_delta = samples_n1 / good_counts1\n k1 = np.nanargmin(averaged_delta[:max_index1]) + 1 #take care of indexing\n if diagn_plots:\n n1_amse = averaged_delta\n x1_arr = np.linspace(1./n1, 1.0, n1)\n \n\n #r second bootstrap with n2 sample size\n n2 = int(n1*n1/float(n))\n samples_n2 = np.zeros(n2-1)\n good_counts2 = np.zeros(n2-1)\n for i in range(r_bootstrap):\n sample = np.random.choice(ordered_data, n2, replace = True)\n sample[::-1].sort()\n M1, M2, M3 = get_moments_estimates_3(sample)\n xi_2 = M1 + 1. - 0.5*(1. - (M1*M1)/M2)**(-1.)\n xi_3 = np.sqrt(0.5*M2) + 1. - (2./3.)*(1. / (1. - M1*M2/M3))\n samples_n2 += (xi_2 - xi_3)**2\n good_counts2[np.where((xi_2 - xi_3)**2 != np.nan)] += 1\n max_index2 = (np.abs(np.linspace(1./n2, 1.0, n2) - eps_stop)).argmin()\n averaged_delta = samples_n2 / good_counts2\n k2 = np.nanargmin(averaged_delta[:max_index2]) + 1 #take care of indexing\n if diagn_plots:\n n2_amse = averaged_delta\n x2_arr = np.linspace(1./n2, 1.0, n2)\n \n if k2 > k1:\n print(\"WARNING(moments): estimated k2 is greater than k1! Re-doing bootstrap...\") \n return 9*[None]\n \n #calculate estimated optimal stopping k\n prefactor = moments_dbs_prefactor(xi_n, n1, k1)\n k_star = int((k1*k1/float(k2)) * prefactor)\n\n if int(k_star) >= len(ordered_data):\n print(\"WARNING: estimated threshold k is larger than the size of data\")\n k_star = len(ordered_data)-1\n if verbose:\n print(\"--- Moments double-bootstrap information ---\")\n print(\"Size of the 1st bootstrap sample n1:\", n1)\n print(\"Size of the 2nd bootstrap sample n2:\", n2)\n print(\"Estimated k1:\", k1)\n print(\"Estimated k2:\", k2)\n print(\"Estimated constant:\", prefactor)\n print(\"Estimated optimal k:\", k_star)\n print(\"--------------------------------------------\")\n if not diagn_plots:\n x1_arr, x2_arr, n1_amse, n2_amse = None, None, None, None\n return k_star, x1_arr, n1_amse, k1/float(n1), max_index1, x2_arr, n2_amse, k2/float(n2), max_index2\n\ndef moments_estimator(ordered_data,\n bootstrap = True, t_bootstrap = 0.5,\n r_bootstrap = 500, verbose = False,\n diagn_plots = False, eps_stop = 0.99):\n \"\"\"\n Function to calculate moments estimator for a given dataset.\n If bootstrap flag is True, double-bootstrap procedure\n for estimation of the optimal number of order statistics is\n performed.\n\n Args:\n ordered_data: numpy array for which tail index estimation\n is performed. Decreasing ordering is required.\n bootstrap: flag to switch on/off double-bootstrap procedure.\n t_bootstrap: parameter controlling the size of the 2nd\n bootstrap. Defined from n2 = n*(t_bootstrap).\n r_bootstrap: number of bootstrap resamplings for the 1st and 2nd\n bootstraps.\n eps_stop: parameter controlling range of AMSE minimization.\n Defined as the fraction of order statistics to consider\n during the AMSE minimization step.\n verbose: flag controlling bootstrap verbosity. \n diagn_plots: flag to switch on/off generation of AMSE diagnostic\n plots.\n\n Returns:\n results: list containing an array of order statistics,\n an array of corresponding tail index estimates,\n the optimal order statistic estimated by double-\n bootstrap and the corresponding tail index,\n an array of fractions of order statistics used for\n the 1st bootstrap sample with an array of corresponding\n AMSE values, value of fraction of order statistics\n corresponding to the minimum of AMSE for the 1st bootstrap\n sample, index of the 1st bootstrap sample's order statistics\n array corresponding to the minimization boundary set\n by eps_stop parameter; and the same characteristics for the\n 2nd bootstrap sample.\n \"\"\"\n n = len(ordered_data)\n M1, M2 = get_moments_estimates_2(ordered_data)\n xi_arr = M1 + 1. - 0.5*(1. - (M1*M1)/M2)**(-1)\n k_arr = np.arange(1, len(ordered_data))\n if bootstrap:\n xi_n = xi_arr[int(np.floor(n**0.5))-1]\n results = moments_dbs(ordered_data, xi_n,\n t_bootstrap = t_bootstrap,\n r_bootstrap = r_bootstrap,\n verbose = verbose, \n diagn_plots = diagn_plots,\n eps_stop = eps_stop)\n while results[0] == None:\n print(\"Resampling...\")\n results = moments_dbs(ordered_data, xi_n,\n t_bootstrap = t_bootstrap,\n r_bootstrap = r_bootstrap,\n verbose = verbose, \n diagn_plots = diagn_plots,\n eps_stop = eps_stop)\n k_star, x1_arr, n1_amse, k1, max_index1, x2_arr, n2_amse, k2, max_index2 = results\n xi_star = xi_arr[k_star-1]\n if xi_star <= 0:\n print (\"Moments estimated gamma: infinity (xi <= 0).\")\n else:\n print (\"Moments estimated gamma:\", 1 + 1./xi_star)\n print(\"**********\")\n else:\n k_star, xi_star = None, None\n x1_arr, n1_amse, k1, max_index1 = 4*[None]\n x2_arr, n2_amse, k2, max_index2 = 4*[None]\n results = [k_arr, xi_arr, k_star, xi_star, x1_arr, n1_amse, k1, max_index1,\\\n x2_arr, n2_amse, k2, max_index2]\n return results\n\n# =======================================================\n# ========== Kernel-type Tail Index Estimation ==========\n# =======================================================\n\ndef get_biweight_kernel_estimates(ordered_data, hsteps, alpha):\n \"\"\"\n Function to calculate biweight kernel-type estimates for tail index.\n Biweight kernel is defined as:\n phi(u) = (15/8) * (1 - u^2)^2\n\n Args:\n ordered_data: numpy array for which tail index estimation\n is performed. Decreasing ordering is required.\n hsteps: parameter controlling number of bandwidth steps\n of the kernel-type estimator.\n alpha: parameter controlling the amount of \"smoothing\"\n for the kernel-type estimator. Should be greater\n than 0.5.\n\n Returns:\n h_arr: numpy array of fractions of order statistics included\n in kernel-type tail index estimation.\n xi_arr: numpy array with tail index estimated corresponding\n to different fractions of order statistics included\n listed in h_arr array.\n \"\"\"\n n = len(ordered_data)\n logs = np.log(ordered_data)\n differences = logs[:-1] - logs[1:]\n i_arr = np.arange(1, n)/float(n)\n i3_arr = i_arr**3\n i5_arr = i_arr**5\n i_alpha_arr = i_arr**alpha\n i_alpha2_arr = i_arr**(2.+alpha)\n i_alpha4_arr = i_arr**(4.+alpha)\n t1 = np.cumsum(i_arr*differences)\n t2 = np.cumsum(i3_arr*differences)\n t3 = np.cumsum(i5_arr*differences)\n t4 = np.cumsum(i_alpha_arr*differences)\n t5 = np.cumsum(i_alpha2_arr*differences)\n t6 = np.cumsum(i_alpha4_arr*differences)\n h_arr = np.logspace(np.log10(1./n), np.log10(1.0), hsteps)\n max_i_vector = (np.floor((n*h_arr))-2.).astype(int)\n gamma_pos = (15./(8*h_arr))*t1[max_i_vector]\\\n - (15./(4*(h_arr**3)))*t2[max_i_vector]\\\n + (15./(8*(h_arr**5)))*t3[max_i_vector]\n\n q1 = (15./(8*h_arr))*t4[max_i_vector]\\\n + (15./(8*(h_arr**5)))*t6[max_i_vector]\\\n - (15./(4*(h_arr**3)))*t5[max_i_vector]\n\n q2 = (15.*(1+alpha)/(8*h_arr))*t4[max_i_vector]\\\n + (15.*(5+alpha)/(8*(h_arr**5)))*t6[max_i_vector]\\\n - (15.*(3+alpha)/(4*(h_arr**3)))*t5[max_i_vector]\n\n xi_arr = gamma_pos -1. + q2/q1\n return h_arr, xi_arr\n\n\ndef get_triweight_kernel_estimates(ordered_data, hsteps, alpha):\n \"\"\"\n Function to calculate triweight kernel-type estimates for tail index.\n Triweight kernel is defined as:\n phi(u) = (35/16) * (1 - u^2)^3\n\n Args:\n ordered_data: numpy array for which tail index estimation\n is performed. Decreasing ordering is required.\n hsteps: parameter controlling number of bandwidth steps\n of the kernel-type estimator.\n alpha: parameter controlling the amount of \"smoothing\"\n for the kernel-type estimator. Should be greater\n than 0.5.\n\n Returns:\n h_arr: numpy array of fractions of order statistics included\n in kernel-type tail index estimation.\n xi_arr: numpy array with tail index estimated corresponding\n to different fractions of order statistics included\n listed in h_arr array.\n \"\"\"\n n = len(ordered_data)\n logs = np.log(ordered_data)\n differences = logs[:-1] - logs[1:]\n i_arr = np.arange(1, n)/float(n)\n i3_arr = i_arr**3\n i5_arr = i_arr**5\n i7_arr = i_arr**7\n i_alpha_arr = i_arr**alpha\n i_alpha2_arr = i_arr**(2.+alpha)\n i_alpha4_arr = i_arr**(4.+alpha)\n i_alpha6_arr = i_arr**(6.+alpha)\n t1 = np.cumsum(i_arr*differences)\n t2 = np.cumsum(i3_arr*differences)\n t3 = np.cumsum(i5_arr*differences)\n t4 = np.cumsum(i7_arr*differences)\n t5 = np.cumsum(i_alpha_arr*differences)\n t6 = np.cumsum(i_alpha2_arr*differences)\n t7 = np.cumsum(i_alpha4_arr*differences)\n t8 = np.cumsum(i_alpha6_arr*differences)\n h_arr = np.logspace(np.log10(1./n), np.log10(1.0), hsteps)\n max_i_vector = (np.floor((n*h_arr))-2.).astype(int)\n\n gamma_pos = (35./(16*h_arr))*t1[max_i_vector]\\\n - (105./(16*(h_arr**3)))*t2[max_i_vector]\\\n + (105./(16*(h_arr**5)))*t3[max_i_vector]\\\n - (35./(16*(h_arr**7)))*t4[max_i_vector]\n\n q1 = (35./(16*h_arr))*t5[max_i_vector]\\\n + (105./(16*(h_arr**5)))*t7[max_i_vector]\\\n - (105./(16*(h_arr**3)))*t6[max_i_vector]\\\n - (35./(16*(h_arr**7)))*t8[max_i_vector]\n\n q2 = (35.*(1+alpha)/(16*h_arr))*t5[max_i_vector] \\\n + (105.*(5+alpha)/(16*(h_arr**5)))*t7[max_i_vector] \\\n - (105.*(3+alpha)/(16*(h_arr**3)))*t6[max_i_vector] \\\n - (35.*(7+alpha)/(16*(h_arr**7)))*t8[max_i_vector]\n\n xi_arr = gamma_pos - 1. + q2/q1\n return h_arr, xi_arr\n\ndef kernel_type_dbs(ordered_data, hsteps, t_bootstrap = 0.5,\n r_bootstrap = 500, alpha = 0.6, eps_stop = 1.0,\n verbose = False, diagn_plots = False):\n \"\"\"\n Function to perform double-bootstrap procedure for \n moments estimator.\n\n Args:\n ordered_data: numpy array for which double-bootstrap\n is performed. Decreasing ordering is required.\n hsteps: parameter controlling number of bandwidth steps\n of the kernel-type estimator.\n t_bootstrap: parameter controlling the size of the 2nd\n bootstrap. Defined from n2 = n*(t_bootstrap).\n r_bootstrap: number of bootstrap resamplings for the 1st and 2nd\n bootstraps.\n alpha: parameter controlling the amount of \"smoothing\"\n for the kernel-type estimator. Should be greater\n than 0.5.\n eps_stop: parameter controlling range of AMSE minimization.\n Defined as the fraction of order statistics to consider\n during the AMSE minimization step.\n verbose: flag controlling bootstrap verbosity. \n diagn_plots: flag to switch on/off generation of AMSE diagnostic\n plots.\n \n\n Returns:\n h_star: fraction of order statistics optimal for estimation\n according to the double-bootstrap procedure.\n x1_arr: array of fractions of order statistics used for the\n 1st bootstrap sample.\n n1_amse: array of AMSE values produced by the 1st bootstrap\n sample.\n h1: value of fraction of order statistics corresponding\n to the minimum of AMSE for the 1st bootstrap sample.\n max_k_index1: index of the 1st bootstrap sample's order statistics\n array corresponding to the minimization boundary set\n by eps_stop parameter.\n x2_arr: array of fractions of order statistics used for the\n 2nd bootstrap sample.\n n2_amse: array of AMSE values produced by the 2nd bootstrap\n sample.\n h2: value of fraction of order statistics corresponding\n to the minimum of AMSE for the 2nd bootstrap sample.\n max_k_index2: index of the 2nd bootstrap sample's order statistics\n array corresponding to the minimization boundary set\n by eps_stop parameter.\n \"\"\"\n if verbose:\n print(\"Performing kernel double-bootstrap...\")\n n = len(ordered_data)\n eps_bootstrap = 0.5*(1+np.log(int(t_bootstrap*n))/np.log(n))\n \n # first bootstrap with n1 sample size\n n1 = int(n**eps_bootstrap)\n samples_n1 = np.zeros(hsteps)\n good_counts1 = np.zeros(hsteps)\n for i in range(r_bootstrap):\n sample = np.random.choice(ordered_data, n1, replace = True)\n sample[::-1].sort()\n _, xi2_arr = get_biweight_kernel_estimates(sample, hsteps, alpha)\n _, xi3_arr = get_triweight_kernel_estimates(sample, hsteps, alpha)\n samples_n1 += (xi2_arr - xi3_arr)**2\n good_counts1[np.where((xi2_arr - xi3_arr)**2 != np.nan)] += 1\n max_index1 = (np.abs(np.logspace(np.log10(1./n1), np.log10(1.0), hsteps) - eps_stop)).argmin()\n x1_arr = np.logspace(np.log10(1./n1), np.log10(1.0), hsteps)\n averaged_delta = samples_n1 / good_counts1\n h1 = x1_arr[np.nanargmin(averaged_delta[:max_index1])] \n if diagn_plots:\n n1_amse = averaged_delta\n \n \n # second bootstrap with n2 sample size\n n2 = int(n1*n1/float(n))\n if n2 < hsteps:\n sys.exit(\"Number of h points is larger than number \"+\\\n \"of order statistics! Please either increase \"+\\\n \"the size of 2nd bootstrap or decrease number \"+\\\n \"of h grid points.\")\n samples_n2 = np.zeros(hsteps)\n good_counts2 = np.zeros(hsteps)\n for i in range(r_bootstrap):\n sample = np.random.choice(ordered_data, n2, replace = True)\n sample[::-1].sort()\n _, xi2_arr = get_biweight_kernel_estimates(sample, hsteps, alpha)\n _, xi3_arr = get_triweight_kernel_estimates(sample, hsteps, alpha)\n samples_n2 += (xi2_arr - xi3_arr)**2\n good_counts2[np.where((xi2_arr - xi3_arr)**2 != np.nan)] += 1\n max_index2 = (np.abs(np.logspace(np.log10(1./n2), np.log10(1.0), hsteps) - eps_stop)).argmin()\n x2_arr = np.logspace(np.log10(1./n2), np.log10(1.0), hsteps)\n averaged_delta = samples_n2 / good_counts2\n h2 = x2_arr[np.nanargmin(averaged_delta[:max_index2])]\n if diagn_plots:\n n2_amse = averaged_delta\n \n A = (143.*((np.log(n1) + np.log(h1))**2)/(3*(np.log(n1) - 13. * np.log(h1))**2))\\\n **(-np.log(h1)/np.log(n1))\n \n h_star = (h1*h1/float(h2)) * A\n\n if h_star > 1:\n print(\"WARNING: estimated threshold is larger than the size of data!\")\n print(\"WARNING: optimal h is set to 1...\")\n h_star = 1.\n\n if verbose:\n print(\"--- Kernel-type double-bootstrap information ---\")\n print(\"Size of the 1st bootstrap sample n1:\", n1)\n print(\"Size of the 2nd bootstrap sample n2:\", n2)\n print(\"Estimated h1:\", h1)\n print(\"Estimated h2:\", h2)\n print(\"Estimated constant A:\", A)\n print(\"Estimated optimal h:\", h_star)\n print(\"------------------------------------------------\")\n if not diagn_plots:\n x1_arr, x2_arr, n1_amse, n2_amse = None, None, None, None\n if x1_arr is not None:\n max_k_index1 = x1_arr[max_index1]\n else:\n max_k_index1 = None\n if x2_arr is not None:\n max_k_index2 = x2_arr[max_index2]\n else:\n max_k_index2 = None\n return h_star, x1_arr, n1_amse, h1, max_k_index1, x2_arr, n2_amse, h2, max_k_index2\n\ndef kernel_type_estimator(ordered_data, hsteps, alpha = 0.6,\n bootstrap = True, t_bootstrap = 0.5,\n r_bootstrap = 500, verbose = False,\n diagn_plots = False, eps_stop = 0.99):\n \"\"\"\n Function to calculate kernel-type estimator for a given dataset.\n If bootstrap flag is True, double-bootstrap procedure\n for estimation of the optimal number of order statistics is\n performed.\n\n Args:\n ordered_data: numpy array for which tail index estimation\n is performed. Decreasing ordering is required.\n hsteps: parameter controlling number of bandwidth steps\n of the kernel-type estimator.\n alpha: parameter controlling the amount of \"smoothing\"\n for the kernel-type estimator. Should be greater\n than 0.5.\n bootstrap: flag to switch on/off double-bootstrap procedure.\n t_bootstrap: parameter controlling the size of the 2nd\n bootstrap. Defined from n2 = n*(t_bootstrap).\n r_bootstrap: number of bootstrap resamplings for the 1st and 2nd\n bootstraps.\n eps_stop: parameter controlling range of AMSE minimization.\n Defined as the fraction of order statistics to consider\n during the AMSE minimization step.\n verbose: flag controlling bootstrap verbosity. \n diagn_plots: flag to switch on/off generation of AMSE diagnostic\n plots.\n\n Returns:\n results: list containing an array of fractions of order statistics,\n an array of corresponding tail index estimates,\n the optimal order statistic estimated by double-\n bootstrap and the corresponding tail index,\n an array of fractions of order statistics used for\n the 1st bootstrap sample with an array of corresponding\n AMSE values, value of fraction of order statistics\n corresponding to the minimum of AMSE for the 1st bootstrap\n sample, index of the 1st bootstrap sample's order statistics\n array corresponding to the minimization boundary set\n by eps_stop parameter; and the same characteristics for the\n 2nd bootstrap sample.\n \"\"\"\n\n n = len(ordered_data)\n h_arr, xi_arr = get_biweight_kernel_estimates(ordered_data, hsteps,\n alpha = alpha)\n if bootstrap:\n results = kernel_type_dbs(ordered_data, hsteps,\n t_bootstrap = t_bootstrap,\n alpha = alpha, r_bootstrap = r_bootstrap,\n verbose = verbose, diagn_plots = diagn_plots,\n eps_stop = eps_stop)\n h_star, x1_arr, n1_amse, h1, max_index1, x2_arr, n2_amse, h2, max_index2 = results\n while h_star == None:\n print(\"Resampling...\")\n results = kernel_type_dbs(ordered_data, hsteps,\n t_bootstrap = t_bootstrap,\n alpha = alpha, r_bootstrap = r_bootstrap,\n verbose = verbose, diagn_plots = diagn_plots,\n eps_stop = eps_stop)\n h_star, x1_arr, n1_amse, h1, max_index1, x2_arr, n2_amse, h2, max_index2 = results\n \n #get k index which corresponds to h_star\n k_star = np.argmin(np.abs(h_arr - h_star))\n xi_star = xi_arr[k_star]\n k_arr = []\n k_star = int(np.floor(h_arr[k_star]*n))-1\n k_arr = np.floor((h_arr * n))\n if xi_star <= 0:\n print (\"Kernel-type estimated gamma: infinity (xi <= 0).\")\n else:\n print (\"Kernel-type estimated gamma:\", 1 + 1./xi_star)\n print(\"**********\")\n else:\n k_star, xi_star = None, None\n x1_arr, n1_amse, h1, max_index1 = 4*[None]\n x2_arr, n2_amse, h2, max_index2 = 4*[None]\n k_arr = np.floor(h_arr * n)\n results = [np.array(k_arr), xi_arr, k_star, xi_star, x1_arr, n1_amse, h1, max_index1,\\\n x2_arr, n2_amse, h2, max_index2]\n return results\n\n# ====================================================\n# ========== Pickands Tail Index Estimation ==========\n# ====================================================\n\ndef pickands_estimator(ordered_data):\n \"\"\"\n Function to calculate Pickands estimator for the tail index.\n\n Args:\n ordered_data: numpy array for which tail index estimation\n is performed. Decreasing ordering is required.\n\n Returns:\n k_arr: array containing order statistics used for\n Pickands estimator calculation. Note that only estimates\n up to floor(n/4)-th order statistic can be calculated.\n xi_arr: array containing tail index estimates corresponding\n to k-order statistics provided in k_arr.\n \"\"\"\n n = len(ordered_data)\n indices_k = np.arange(1, int(np.floor(n/4.))+1)\n indices_2k = 2*indices_k\n indices_4k = 4*indices_k\n Z_k = ordered_data[indices_k-1]\n Z_2k = ordered_data[indices_2k-1]\n Z_4k = ordered_data[indices_4k-1]\n xi_arr = (1./np.log(2)) * np.log((Z_k - Z_2k) / (Z_2k - Z_4k))\n k_arr = np.array([float(i) for i in range(1, int(np.floor(n/4.))+1)])\n return k_arr, xi_arr\n\n# ==================================================\n# ========== Plotting and Data Processing ==========\n# ==================================================\n\ndef make_plots(ordered_data, output_file_path, number_of_bins,\n r_smooth, alpha, hsteps, bootstrap_flag, t_bootstrap,\n r_bootstrap, diagn_plots, eps_stop, theta1, theta2, \n verbose, noise_flag, p_noise, savedata):\n \"\"\" \n Function to create plots and save tail index estimation data.\n\n Args:\n ordered_data: numpy array for which tail index estimation\n is performed. Decreasing ordering is required.\n output_file_path: file path to which plots should be saved.\n number_of_bins: number of log-bins for degree distribution.\n r_smooth: integer parameter controlling the width\n of smoothing window. Typically small\n value such as 2 or 3.\n alpha: parameter controlling the amount of \"smoothing\"\n for the kernel-type estimator. Should be greater\n than 0.5.\n hsteps: parameter controlling number of bandwidth steps\n of the kernel-type estimator.\n bootstrap_flag: flag to switch on/off double-bootstrap procedure.\n t_bootstrap: parameter controlling the size of the 2nd\n bootstrap. Defined from n2 = n*(t_bootstrap).\n r_bootstrap: number of bootstrap resamplings for the 1st and 2nd\n bootstraps.\n diagn_plots: flag to switch on/off generation of AMSE diagnostic\n plots.\n eps_stop: parameter controlling range of AMSE minimization.\n Defined as the fraction of order statistics to\n consider during the AMSE minimization step.\n theta1: Lower bound of plotting range, defined as\n k_min = ceil(n^theta1).\n Overwritten if plots behave badly within the range.\n theta2: Upper bound of plotting range, defined as\n k_max = floor(n^theta2).\n Overwritten if plots behave badly within the range.\n verbose: flag controlling bootstrap verbosity.\n noise_flag: Switch on/off uniform noise in range\n [-5*10^(-p), 5*10^(-p)] that is added to each\n data point. Used for integer-valued sequences\n with p = 1 (default = 1).\n p_noise: integer parameter controlling noise amplitude.\n savedata: Flag to save data files in the directory with plots.\n \"\"\"\n output_dir = os.path.dirname(os.path.realpath(output_file_path))\n output_name = os.path.splitext(os.path.basename(output_file_path))[0]\n # calculate log-binned PDF\n if verbose:\n print(\"Calculating PDF...\")\n t1 =time.time()\n x_pdf, y_pdf = get_distribution(ordered_data,\n number_of_bins = number_of_bins)\n t2 =time.time()\n if verbose:\n print(\"Elapsed time(PDF):\", t2-t1)\n if savedata == 1:\n with open(os.path.join(output_dir+\"/\"+output_name+\"_pdf.dat\"), \"w\") as f:\n for i in range(len(x_pdf)):\n f.write(str(x_pdf[i]) + \" \" + str(y_pdf[i]) + \"\\n\")\n\n # calculate CCDF\n if verbose:\n print(\"Calculating CCDF...\")\n t1 = time.time()\n x_ccdf, y_ccdf = get_ccdf(ordered_data)\n t2 = time.time()\n if verbose:\n print(\"Elapsed time:\", t2-t1)\n if savedata == 1:\n with open(os.path.join(output_dir+\"/\"+output_name+\"_ccdf.dat\"), \"w\") as f:\n for i in range(len(x_ccdf)):\n f.write(str(x_ccdf[i]) + \" \" + str(y_ccdf[i]) + \"\\n\")\n\n # add noise if needed\n if noise_flag:\n original_discrete_data = ordered_data\n discrete_ordered_data = ordered_data\n discrete_ordered_data[::-1].sort()\n ordered_data = add_uniform_noise(ordered_data, p = p_noise)\n ordered_data[::-1].sort()\n \n # perform Pickands estimation\n if verbose:\n print(\"Calculating Pickands...\")\n t1=time.time()\n k_p_arr, xi_p_arr = pickands_estimator(ordered_data)\n t2 =time.time()\n if verbose:\n print(\"Elapsed time (Pickands):\", t2-t1)\n if savedata == 1:\n with open(os.path.join(output_dir+\"/\"+output_name+\"_pickands.dat\"), \"w\") as f:\n for i in range(len(k_p_arr)):\n f.write(str(k_p_arr[i]) + \" \" + str(xi_p_arr[i]) + \"\\n\")\n \n\n # perform smooth Hill estimation\n if verbose:\n print(\"Calculating smooth Hill...\")\n t1=time.time()\n k_sh_arr, xi_sh_arr = smooth_hill_estimator(ordered_data,\n r_smooth = r_smooth)\n t2=time.time()\n if verbose:\n print(\"Elapsed time (smooth Hill):\", t2-t1)\n if savedata == 1:\n with open(os.path.join(output_dir+\"/\"+output_name+\"_sm_hill.dat\"), \"w\") as f:\n for i in range(len(k_sh_arr)):\n f.write(str(k_sh_arr[i]) + \" \" + str(xi_sh_arr[i]) + \"\\n\")\n \n \n # perform adjusted Hill estimation\n if verbose:\n print(\"Calculating adjusted Hill...\")\n t1 = time.time()\n hill_results = hill_estimator(ordered_data,\n bootstrap = bootstrap_flag,\n t_bootstrap = t_bootstrap,\n r_bootstrap = r_bootstrap,\n diagn_plots = diagn_plots,\n eps_stop = eps_stop, \n verbose = verbose)\n t2 =time.time()\n if verbose:\n print(\"Elapsed time (Hill):\", t2-t1)\n k_h_arr = hill_results[0]\n xi_h_arr = hill_results[1]\n k_h_star = hill_results[2]\n xi_h_star = hill_results[3]\n x1_h_arr, n1_h_amse, k1_h, max_h_index1 = hill_results[4:8]\n x2_h_arr, n2_h_amse, k2_h, max_h_index2 = hill_results[8:]\n if savedata == 1:\n with open(os.path.join(output_dir+\"/\"+output_name+\"_adj_hill_plot.dat\"), \"w\") as f:\n for i in range(len(k_h_arr)):\n f.write(str(k_h_arr[i]) + \" \" + str(xi_h_arr[i]) + \"\\n\")\n with open(os.path.join(output_dir+\"/\"+output_name+\"_adj_hill_estimate.dat\"), \"w\") as f:\n f.write(str(k_h_star) + \" \" + str(xi_h_star) + \"\\n\")\n\n # perform moments estimation\n if verbose:\n print(\"Calculating moments...\")\n t1 = time.time()\n moments_results = moments_estimator(ordered_data,\n bootstrap = bootstrap_flag,\n t_bootstrap = t_bootstrap,\n r_bootstrap = r_bootstrap,\n diagn_plots = diagn_plots,\n eps_stop = eps_stop, \n verbose = verbose)\n t2 = time.time()\n if verbose:\n print(\"Elapsed time (moments):\", t2-t1)\n k_m_arr = moments_results[0]\n xi_m_arr = moments_results[1]\n k_m_star = moments_results[2]\n xi_m_star = moments_results[3]\n x1_m_arr, n1_m_amse, k1_m, max_m_index1 = moments_results[4:8]\n x2_m_arr, n2_m_amse, k2_m, max_m_index2 = moments_results[8:]\n if savedata == 1:\n with open(os.path.join(output_dir+\"/\"+output_name+\"_mom_plot.dat\"), \"w\") as f:\n for i in range(len(k_m_arr)):\n f.write(str(k_m_arr[i]) + \" \" + str(xi_m_arr[i]) + \"\\n\")\n with open(os.path.join(output_dir+\"/\"+output_name+\"_mom_estimate.dat\"), \"w\") as f:\n f.write(str(k_m_star) + \" \" + str(xi_m_star) + \"\\n\")\n # perform kernel-type estimation\n if verbose:\n print(\"Calculating kernel-type...\")\n t1 = time.time()\n kernel_type_results = kernel_type_estimator(ordered_data, hsteps,\n alpha = alpha,\n bootstrap = bootstrap_flag,\n t_bootstrap = t_bootstrap,\n r_bootstrap = r_bootstrap,\n diagn_plots = diagn_plots,\n eps_stop = eps_stop, \n verbose = verbose)\n t2 = time.time()\n if verbose:\n print(\"Elapsed time (kernel-type):\", t2-t1)\n k_k_arr = kernel_type_results[0]\n xi_k_arr = kernel_type_results[1]\n k_k_star = kernel_type_results[2]\n xi_k_star = kernel_type_results[3]\n x1_k_arr, n1_k_amse, h1, max_k_index1 = kernel_type_results[4:8]\n x2_k_arr, n2_k_amse, h2, max_k_index2 = kernel_type_results[8:]\n\n if bootstrap_flag:\n k_k1_star = np.argmin(np.abs(k_k_arr - k_k_star))\n if savedata == 1:\n with open(os.path.join(output_dir+\"/\"+output_name+\"_kern_plot.dat\"), \"w\") as f:\n for i in range(len(k_k_arr)):\n f.write(str(k_k_arr[i]) + \" \" + str(xi_k_arr[i]) + \"\\n\")\n with open(os.path.join(output_dir+\"/\"+output_name+\"_kern_estimate.dat\"), \"w\") as f:\n f.write(str(k_k_arr[k_k1_star]) + \" \" + str(xi_k_arr[k_k1_star]) + \"\\n\")\n \n # plotting part\n if verbose:\n print(\"Making plots...\")\n\n fig, axes = plt.subplots(3, 2, figsize = (12, 16))\n for ax in axes.reshape(-1):\n ax.tick_params(direction='out', length=6, width=1.5,\n labelsize = 12, which = 'major')\n ax.tick_params(direction='out', length=3, width=1, which = 'minor')\n [i.set_linewidth(1.5) for i in ax.spines.values()]\n\n # plot PDF\n axes[0,0].set_xlabel(r\"Degree $k$\", fontsize = 20)\n axes[0,0].set_ylabel(r\"$P(k)$\", fontsize = 20)\n axes[0,0].loglog(x_pdf, y_pdf, color = \"#386cb0\", marker = \"s\",\n lw = 1.5, markeredgecolor = \"black\")\n\n # plot CCDF\n axes[0,1].set_xlabel(r\"Degree $k$\", fontsize = 20)\n axes[0,1].set_ylabel(r\"$\\bar{F}(k)$\", fontsize = 20)\n axes[0,1].set_xscale(\"log\")\n axes[0,1].set_yscale(\"log\")\n axes[0,1].step(x_ccdf, y_ccdf, color = \"#386cb0\", lw = 1.5)\n \n # draw scalings\n if noise_flag:\n xmin = discrete_ordered_data[k_h_star]\n else:\n xmin = ordered_data[k_h_star]\n x = x_ccdf[np.where(x_ccdf >= xmin)]\n l = np.mean(y_ccdf[np.where(x == xmin)])\n alpha = 1./xi_h_star\n if xi_h_star > 0:\n axes[0,1].plot(x, [l*(float(xmin)/k)**alpha for k in x],\n color = '#fb8072', ls = '--', lw = 2,\n label = r\"Adj. Hill Scaling $(\\alpha=\"+\\\n str(np.round(1./xi_h_star, decimals = 3))+r\")$\")\n axes[0,1].plot((x[-1]), [l*(float(xmin)/x[-1])**(alpha)],\n color = \"#fb8072\", ls = 'none', marker = 'o',\n markerfacecolor = 'none', markeredgecolor = \"#fb8072\",\n markeredgewidth = 3, markersize = 10)\n if noise_flag:\n xmin = discrete_ordered_data[k_m_star]\n else:\n xmin = ordered_data[k_m_star]\n x = x_ccdf[np.where(x_ccdf >= xmin)]\n l = np.mean(y_ccdf[np.where(x == xmin)])\n alpha = 1./xi_m_star\n if xi_m_star > 0:\n axes[0,1].plot(x, [l*(float(xmin)/k)**alpha for k in x],\n color = '#8dd3c7', ls = '--', lw = 2,\n label = r\"Moments Scaling $(\\alpha=\"+\\\n str(np.round(1./xi_m_star, decimals = 3))+r\")$\")\n axes[0,1].plot((x[-1]), [l*(float(xmin)/x[-1])**(alpha)],\n color = \"#8dd3c7\", ls = 'none', marker = 'o',\n markerfacecolor = 'none', markeredgecolor = \"#8dd3c7\",\n markeredgewidth = 3, markersize = 10)\n if noise_flag:\n xmin = discrete_ordered_data[k_k_star]\n else:\n xmin = ordered_data[k_k_star]\n \n x = x_ccdf[np.where(x_ccdf >= xmin)]\n l = np.mean(y_ccdf[np.where(x == xmin)])\n alpha = 1./xi_k_star\n if xi_k_star > 0:\n axes[0,1].plot(x, [l*(float(xmin)/k)**alpha for k in x],\n color = '#fdb462', ls = '--', lw = 2,\n label = r\"Kernel Scaling $(\\alpha=\"+\\\n str(np.round(1./xi_k_star, decimals = 3))+r\")$\")\n axes[0,1].plot((x[-1]), [l*(float(xmin)/x[-1])**(alpha)],\n color = \"#8dd3c7\", ls = 'none', marker = 'o',\n markerfacecolor = 'none', markeredgecolor = \"#fdb462\",\n markeredgewidth = 3, markersize = 10)\n axes[0,1].legend(loc = 'best')\n\n # define min and max order statistics to plot\n min_k = int(np.ceil(len(k_h_arr)**theta1)) - 1\n max_k = int(np.floor(len(k_h_arr)**theta2)) - 1\n # check if estimators' values are not too off in these bounds\n min_k_index = (np.abs(k_sh_arr - min_k)).argmin()\n max_k_index = (np.abs(k_sh_arr - max_k)).argmin()\n if (xi_sh_arr[min_k_index] <= -3 or xi_sh_arr[min_k_index] >= 3):\n indices_to_plot_sh = np.where((xi_sh_arr <= 3) & (xi_sh_arr >= -3))\n elif (xi_sh_arr[max_k_index] <= -3 or xi_sh_arr[max_k_index] >= 3):\n indices_to_plot_sh = np.where((xi_sh_arr <= 3) & (xi_sh_arr >= -3))\n else:\n indices_to_plot_sh = np.where((k_sh_arr <= max_k) & (k_sh_arr >= min_k))\n axes[1,0].set_xlabel(r\"Number of Order Statistics $\\kappa$\", fontsize = 20)\n axes[1,0].set_ylabel(r\"Estimated $\\hat{\\xi}$\", fontsize = 20) \n # plot smooth Hill\n \n axes[1,0].plot(k_sh_arr[indices_to_plot_sh], xi_sh_arr[indices_to_plot_sh],\n color = \"#b3de69\", alpha = 0.8, label = \"Smooth Hill\",\n zorder = 10)\n\n # plot adjusted Hill\n # check if estimators' values are not too off in these bounds\n if (xi_h_arr[min_k-1] <= -3 or xi_h_arr[min_k-1] >= 3):\n indices_to_plot_h = np.where((xi_h_arr <= 3) & (xi_h_arr >= -3))\n elif (xi_h_arr[max_k-1] <= -3 or xi_h_arr[max_k-1] >= 3):\n indices_to_plot_h = np.where((xi_h_arr <= 3) & (xi_h_arr >= -3))\n else:\n indices_to_plot_h = np.where((k_h_arr <= max_k) & (k_h_arr >= min_k))\n axes[1,0].plot(k_h_arr[indices_to_plot_h], xi_h_arr[indices_to_plot_h],\n color = \"#fb8072\", alpha = 0.8, label = \"Adjusted Hill\",\n zorder = 10)\n if bootstrap_flag:\n axes[1,0].scatter([k_h_arr[k_h_star-1]], [xi_h_arr[k_h_star-1]],\n color = \"#fb8072\", marker = \"*\", s = 100,\n edgecolor = \"black\", zorder = 20, \n label = r\"$\\widehat{\\xi}^{Hill}=\"\\\n +str(np.round([xi_h_arr[k_h_star-1]][0], decimals = 3))\\\n +r\"$\")\n axes[1,0].legend(loc = \"best\")\n\n \n axes[1,1].set_xlabel(r\"Number of Order Statistics $\\kappa$\", fontsize = 20)\n axes[1,1].set_ylabel(r\"Estimated $\\hat{\\xi}$\", fontsize = 20) \n axes[1,1].set_xscale(\"log\") \n \n # plot smooth Hill\n axes[1,1].plot(k_sh_arr[indices_to_plot_sh], xi_sh_arr[indices_to_plot_sh],\n color = \"#b3de69\", alpha = 0.8, label = \"Smooth Hill\",\n zorder = 10)\n # plot adjusted Hill\n indices_to_plot = np.where((k_h_arr <= max_k) & (k_h_arr >= min_k))\n axes[1,1].plot(k_h_arr[indices_to_plot_h], xi_h_arr[indices_to_plot_h],\n color = \"#fb8072\", alpha = 0.8, label = \"Adjusted Hill\",\n zorder = 10)\n if bootstrap_flag:\n axes[1,1].scatter([k_h_arr[k_h_star-1]], [xi_h_arr[k_h_star-1]],\n color = \"#fb8072\", marker = \"*\", s = 100,\n edgecolor = \"black\", zorder = 20, \n label = r\"$\\widehat{\\xi}^{Hill}=\"\\\n +str(np.round([xi_h_arr[k_h_star-1]][0], decimals = 3))\\\n +r\"$\")\n axes[1,1].legend(loc = \"best\")\n\n axes[2,0].set_xlabel(r\"Number of Order Statistics $\\kappa$\", fontsize = 20)\n axes[2,0].set_ylabel(r\"Estimated $\\hat{\\xi}$\", fontsize = 20)\n #plot Pickands\n min_k_index = (np.abs(k_p_arr - min_k)).argmin()\n max_k_index = (np.abs(k_p_arr - max_k)).argmin()\n if (xi_p_arr[min_k_index] <= -3 or xi_p_arr[min_k_index] >= 3):\n indices_to_plot_p = np.where((xi_p_arr <= 3) & (xi_p_arr >= -3))\n elif (xi_p_arr[max_k_index] <= -3 or xi_p_arr[max_k_index] >= 3):\n indices_to_plot_p = np.where((xi_p_arr <= 3) & (xi_p_arr >= -3))\n else:\n indices_to_plot_p = np.where((k_p_arr <= max_k) & (k_p_arr >= min_k))\n axes[2,0].plot(k_p_arr[indices_to_plot_p], xi_p_arr[indices_to_plot_p],\n color = \"#bc80bd\", alpha = 0.8, label = \"Pickands\",\n zorder = 10)\n #plot moments\n if (xi_m_arr[min_k-1] <= -3 or xi_m_arr[min_k-1] >= 3):\n indices_to_plot_m = np.where((xi_m_arr <= 3) & (xi_m_arr >= -3))\n elif (xi_m_arr[max_k-1] <= -3 or xi_m_arr[max_k-1] >= 3):\n indices_to_plot_m = np.where((xi_m_arr <= 3) & (xi_m_arr >= -3))\n else:\n indices_to_plot_m = np.where((k_m_arr <= max_k) & (k_m_arr >= min_k))\n \n axes[2,0].plot(k_m_arr[indices_to_plot_m], xi_m_arr[indices_to_plot_m],\n color = \"#8dd3c7\", alpha = 0.8, label = \"Moments\",\n zorder = 10)\n if bootstrap_flag:\n axes[2,0].scatter([k_m_arr[k_m_star-1]], [xi_m_arr[k_m_star-1]],\n color = \"#8dd3c7\", marker = \"*\", s = 100,\n edgecolor = \"black\", zorder = 20, \n label = r\"$\\widehat{\\xi}^{Moments}=\"\\\n +str(np.round([xi_m_arr[k_m_star-1]][0], decimals = 3))\\\n +r\"$\")\n #plot kernel-type\n min_k_index = (np.abs(k_k_arr - min_k)).argmin()\n max_k_index = (np.abs(k_k_arr - max_k)).argmin()\n if (xi_k_arr[min_k_index] <= -3 or xi_k_arr[min_k_index] >= 3):\n indices_to_plot_k = np.where((xi_k_arr <= 3) & (xi_k_arr >= -3))\n elif (xi_k_arr[max_k_index] <= -3 or xi_k_arr[max_k_index] >= 3):\n indices_to_plot_k = np.where((xi_k_arr <= 3) & (xi_k_arr >= -3))\n else:\n indices_to_plot_k = list(range(min_k_index, max_k_index))\n #indices_to_plot_k = np.where((xi_k_arr <= 3) & (xi_k_arr >= -3))\n axes[2,0].plot(k_k_arr[indices_to_plot_k], xi_k_arr[indices_to_plot_k],\n color = \"#fdb462\", alpha = 0.8, label = \"Kernel\",\n zorder = 10)\n if bootstrap_flag:\n axes[2,0].scatter([k_k_arr[k_k1_star-1]], [xi_k_arr[k_k1_star-1]],\n color = \"#fdb462\", marker = \"*\", s = 100,\n edgecolor = \"black\", zorder = 20, \n label = r\"$\\widehat{\\xi}^{Kernel}=\"\\\n +str(np.round([xi_k_arr[k_k1_star-1]][0], decimals = 3))\\\n +r\"$\")\n axes[2,0].legend(loc = \"best\")\n # for clarity purposes, display only xi region between -1 and 1\n axes[2,0].set_ylim((-0.5,1.5))\n\n axes[2,1].set_xlabel(r\"Number of Order Statistics $\\kappa$\", fontsize = 20)\n axes[2,1].set_ylabel(r\"Estimated $\\hat{\\xi}$\", fontsize = 20)\n axes[2,1].set_xscale(\"log\")\n\n #plot Pickands\n axes[2,1].plot(k_p_arr[indices_to_plot_p], xi_p_arr[indices_to_plot_p],\n color = \"#bc80bd\", alpha = 0.8, label = \"Pickands\",\n zorder = 10)\n #plot moments\n axes[2,1].plot(k_m_arr[indices_to_plot_m], xi_m_arr[indices_to_plot_m],\n color = \"#8dd3c7\", alpha = 0.8, label = \"Moments\",\n zorder = 10)\n if bootstrap_flag:\n axes[2,1].scatter([k_m_arr[k_m_star-1]], [xi_m_arr[k_m_star-1]],\n color = \"#8dd3c7\", marker = \"*\", s = 100,\n edgecolor = \"black\", zorder = 20, \n label = r\"$\\widehat{\\xi}^{Moments}=\"\\\n +str(np.round([xi_m_arr[k_m_star-1]][0], decimals = 3))\\\n +r\"$\")\n #plot kernel-type\n axes[2,1].plot(k_k_arr[indices_to_plot_k], xi_k_arr[indices_to_plot_k],\n color = \"#fdb462\", alpha = 0.8, label = \"Kernel\",\n zorder = 10)\n if bootstrap_flag:\n axes[2,1].scatter([k_k_arr[k_k1_star-1]], [xi_k_arr[k_k1_star-1]],\n color = \"#fdb462\", marker = \"*\", s = 100,\n edgecolor = \"black\", zorder = 20, \n label = r\"$\\widehat{\\xi}^{Kernel}=\"\\\n +str(np.round([xi_k_arr[k_k1_star-1]][0], decimals = 3))\\\n +r\"$\")\n # for clarity purposes, display only xi region between -1 and 1\n axes[2,1].set_ylim((-0.5,1.5))\n axes[2,1].legend(loc = \"best\")\n\n if diagn_plots:\n fig_d, axes_d = plt.subplots(1, 3, figsize = (18, 6))\n\n # filter out boundary values using theta parameters for Hill\n min_k1 = 2\n max_k1 = len(x1_h_arr) - 1\n min_k2 = 2\n max_k2 = len(x2_h_arr) - 1\n axes_d[0].set_yscale(\"log\")\n axes_d[0].set_xscale(\"log\")\n axes_d[1].set_xscale(\"log\")\n axes_d[2].set_xscale(\"log\")\n n1_h_amse[np.where((n1_h_amse == np.inf) |\\\n (n1_h_amse == -np.inf))] = np.nan\n axes_d[0].set_ylim((0.1*np.nanmin(n1_h_amse[min_k1:max_k1]), 1.0))\n axes_d[0].set_xlabel(\"Fraction of Bootstrap Order Statistics\",\n fontsize = 20)\n axes_d[0].set_ylabel(r\"$\\langle AMSE \\rangle$\", fontsize = 20)\n axes_d[0].set_title(\"Adjusted Hill Estimator\", fontsize = 20)\n # plot AMSE and corresponding minimum\n axes_d[0].plot(x1_h_arr[min_k1:max_k1], n1_h_amse[min_k1:max_k1],\n alpha = 0.5, lw = 1.5,\n color = \"#d55e00\", label = r\"$n_1$ samples\")\n axes_d[0].scatter([k1_h], [n1_h_amse[int(len(x1_h_arr)*k1_h)-1]],\n color = \"#d55e00\",\n marker = 'o', edgecolor = \"black\", alpha = 0.5,\n label = r\"Min for $n_1$ sample\")\n axes_d[0].plot(x2_h_arr[min_k2:max_k2], n2_h_amse[min_k2:max_k2],\n alpha = 0.5, lw = 1.5,\n color = \"#0072b2\", label = r\"$n_2$ samples\")\n axes_d[0].scatter([k2_h], [n2_h_amse[int(len(x2_h_arr)*k2_h)-1]],\n color = \"#0072b2\",\n marker = 'o', edgecolor = \"black\", alpha = 0.5,\n label = r\"Min for $n_2$ sample\")\n axes_d[0].axvline(max_h_index1/float(len(x1_h_arr)), color = \"#d55e00\",\n ls = '--', alpha = 0.5,\n label = r\"Minimization boundary for $n_1$ sample\")\n axes_d[0].axvline(max_h_index2/float(len(x2_h_arr)), color = \"#0072b2\",\n ls = '--', alpha = 0.5,\n label = r\"Minimization boundary for $n_2$ sample\")\n \n\n axes_d[0].legend(loc = \"best\")\n if savedata == 1:\n with open(os.path.join(output_dir+\"/\"+output_name+\"_adjhill_diagn1.dat\"), \"w\") as f:\n for i in range(len(x1_h_arr[min_k1:max_k1])):\n f.write(str(x1_h_arr[min_k1:max_k1][i]) + \" \" + str(n1_h_amse[min_k1:max_k1][i]) + \"\\n\")\n with open(os.path.join(output_dir+\"/\"+output_name+\"_adjhill_diagn2.dat\"), \"w\") as f:\n for i in range(len(x2_h_arr[min_k2:max_k2])):\n f.write(str(x2_h_arr[min_k2:max_k2][i]) + \" \" + str(n2_h_amse[min_k2:max_k2][i]) + \"\\n\")\n with open(os.path.join(output_dir+\"/\"+output_name+\"_adjhill_diagn_points.dat\"), \"w\") as f:\n f.write(\"Min for n1 sample: \"+str(k1_h)+\" \"+str(n1_h_amse[int(len(x1_h_arr)*k1_h)-1])+\"\\n\")\n f.write(\"Min for n2 sample: \"+str(k2_h)+\" \"+str(n2_h_amse[int(len(x2_h_arr)*k2_h)-1])+\"\\n\")\n f.write(\"Minimization boundary for n1 sample: \"+str(max_h_index1/float(len(x1_h_arr)))+\"\\n\")\n f.write(\"Minimization boundary for n2 sample: \"+str(max_h_index2/float(len(x2_h_arr)))+\"\\n\")\n\n # filter out boundary values using theta parameters for moments\n min_k1 = 2\n max_k1 = len(x1_m_arr) - 1\n min_k2 = 2\n max_k2 = len(x2_m_arr) - 1\n n1_m_amse[np.where((n1_m_amse == np.inf) |\\\n (n1_m_amse == -np.inf))] = np.nan\n axes_d[1].set_yscale(\"log\")\n axes_d[1].set_ylim((0.1*np.nanmin(n1_m_amse[min_k1:max_k1]), 1.0))\n axes_d[1].set_xlabel(\"Fraction of Bootstrap Order Statistics\",\n fontsize = 20)\n axes_d[1].set_ylabel(r\"$\\langle AMSE \\rangle$\", fontsize = 20)\n axes_d[1].set_title(\"Moments Estimator\", fontsize = 20)\n # plot AMSE and corresponding minimum\n axes_d[1].plot(x1_m_arr[min_k1:max_k1], n1_m_amse[min_k1:max_k1],\n alpha = 0.5, lw = 1.5,\n color = \"#d55e00\", label = r\"$n_1$ samples\")\n axes_d[1].scatter([k1_m], [n1_m_amse[int(len(x1_m_arr)*k1_m)-1]],\n color = \"#d55e00\",\n marker = 'o', edgecolor = \"black\", alpha = 0.5,\n label = r\"Min for $n_1$ sample\")\n axes_d[1].plot(x2_m_arr[min_k2:max_k2], n2_m_amse[min_k2:max_k2],\n alpha = 0.5, lw = 1.5,\n color = \"#0072b2\", label = r\"$n_2$ samples\")\n axes_d[1].scatter([k2_m], [n2_m_amse[int(len(x2_m_arr)*k2_m)-1]],\n color = \"#0072b2\",\n marker = 'o', edgecolor = \"black\", alpha = 0.5,\n label = r\"Min for $n_2$ sample\")\n axes_d[1].axvline(max_m_index1/float(len(x1_m_arr)), color = \"#d55e00\",\n ls = '--', alpha = 0.5,\n label = r\"Minimization boundary for $n_1$ sample\")\n axes_d[1].axvline(max_m_index2/float(len(x2_m_arr)), color = \"#0072b2\",\n ls = '--', alpha = 0.5,\n label = r\"Minimization boundary for $n_2$ sample\")\n axes_d[1].legend(loc = \"best\")\n if savedata == 1:\n with open(os.path.join(output_dir+\"/\"+output_name+\"_mom_diagn1.dat\"), \"w\") as f:\n for i in range(len(x1_m_arr[min_k1:max_k1])):\n f.write(str(x1_m_arr[min_k1:max_k1][i]) + \" \" + str(n1_m_amse[min_k1:max_k1][i]) + \"\\n\")\n with open(os.path.join(output_dir+\"/\"+output_name+\"_mom_diagn2.dat\"), \"w\") as f:\n for i in range(len(x2_m_arr[min_k2:max_k2])):\n f.write(str(x2_m_arr[min_k2:max_k2][i]) + \" \" + str(n2_m_amse[min_k2:max_k2][i]) + \"\\n\")\n with open(os.path.join(output_dir+\"/\"+output_name+\"_mom_diagn_points.dat\"), \"w\") as f:\n f.write(\"Min for n1 sample: \"+str(k1_m)+\" \"+str(n1_m_amse[int(len(x1_m_arr)*k1_m)-1])+\"\\n\")\n f.write(\"Min for n2 sample: \"+str(k2_m)+\" \"+str(n2_m_amse[int(len(x2_m_arr)*k2_m)-1])+\"\\n\")\n f.write(\"Minimization boundary for n1 sample: \"+str(max_m_index1/float(len(x1_m_arr)))+\"\\n\")\n f.write(\"Minimization boundary for n2 sample: \"+str(max_m_index2/float(len(x2_m_arr)))+\"\\n\") \n\n\n min_k1 = 2\n max_k1 = len(x1_k_arr)\n min_k2 = 2\n max_k2 = len(x2_k_arr)\n n1_k_amse[np.where((n1_k_amse == np.inf) |\\\n (n1_k_amse == -np.inf))] = np.nan\n axes_d[2].set_yscale(\"log\")\n axes_d[2].set_ylim((0.1*np.nanmin(n1_k_amse[min_k1:max_k1]), 1.0))\n axes_d[2].set_xlabel(\"Fraction of Bootstrap Order Statistics\",\n fontsize = 20)\n axes_d[2].set_ylabel(r\"$\\langle AMSE \\rangle$\", fontsize = 20)\n axes_d[2].set_title(\"Kernel-type Estimator\", fontsize = 20)\n # plot AMSE and corresponding minimum\n axes_d[2].plot(x1_k_arr[min_k1:max_k1], n1_k_amse[min_k1:max_k1],\n alpha = 0.5, lw = 1.5,\n color = \"#d55e00\", label = r\"$n_1$ samples\")\n axes_d[2].scatter([h1], [n1_k_amse[np.where(x1_k_arr == h1)]], color = \"#d55e00\",\n marker = 'o', edgecolor = \"black\", alpha = 0.5,\n label = r\"Min for $n_1$ sample\")\n # plot boundary of minimization\n axes_d[2].axvline(max_k_index1, color = \"#d55e00\",\n ls = '--', alpha = 0.5,\n label = r\"Minimization boundary for $n_2$ sample\")\n axes_d[2].plot(x2_k_arr[min_k2:max_k2], n2_k_amse[min_k2:max_k2],\n alpha = 0.5, lw = 1.5,\n color = \"#0072b2\", label = r\"$n_2$ samples\")\n axes_d[2].scatter([h2], [n2_k_amse[np.where(x2_k_arr == h2)]], color = \"#0072b2\",\n marker = 'o', edgecolor = \"black\", alpha = 0.5,\n label = r\"Min for $n_2$ sample\")\n axes_d[2].axvline(max_k_index2, color = \"#0072b2\",\n ls = '--', alpha = 0.5,\n label = r\"Minimization boundary for $n_2$ sample\")\n axes_d[2].legend(loc = \"best\")\n if savedata == 1:\n with open(os.path.join(output_dir+\"/\"+output_name+\"_kern_diagn1.dat\"), \"w\") as f:\n for i in range(len(x1_k_arr[min_k1:max_k1])):\n f.write(str(x1_k_arr[min_k1:max_k1][i]) + \" \" + str(n1_k_amse[min_k1:max_k1][i]) + \"\\n\")\n with open(os.path.join(output_dir+\"/\"+output_name+\"_kern_diagn2.dat\"), \"w\") as f:\n for i in range(len(x2_m_arr[min_k2:max_k2])):\n f.write(str(x2_k_arr[min_k2:max_k2][i]) + \" \" + str(n2_k_amse[min_k2:max_k2][i]) + \"\\n\")\n with open(os.path.join(output_dir+\"/\"+output_name+\"_kern_diagn_points.dat\"), \"w\") as f:\n f.write(\"Min for n1 sample: \"+str(h1)+\" \"+str(n1_k_amse[np.where(x1_k_arr == h1)][0])+\"\\n\")\n f.write(\"Min for n2 sample: \"+str(h2)+\" \"+str(n2_k_amse[np.where(x2_k_arr == h2)][0])+\"\\n\")\n f.write(\"Minimization boundary for n1 sample: \"+str(n1_k_amse[int(max_k_index1*hsteps)-1])+\"\\n\")\n f.write(\"Minimization boundary for n2 sample: \"+str(n2_k_amse[int(max_k_index2*hsteps)-1])+\"\\n\")\n\n fig_d.tight_layout()\n diag_plots_path = output_dir+\"/\"+output_name+\"_diag.pdf\"\n fig_d.savefig(diag_plots_path)\n\n fig.tight_layout(pad = 0.2)\n fig.savefig(output_file_path)\n\n# ==========================\n# ========== Main ==========\n# ==========================\n\ndef main():\n #ignore warnings other than explicit ones\n warnings.filterwarnings(\"ignore\")\n parser = argparse.ArgumentParser(description =\n \"Script to compute tail index estimates\\\n for a provided dataset.\")\n parser.add_argument(\"sequence_file_path\",\n help = \"Path to a data sequence.\", type = str)\n parser.add_argument(\"output_file_path\",\n help = \"Output path for plots. Use either PDF or\\\n PNG format.\", type = str)\n parser.add_argument(\"--nbins\",\n help = \"Number of bins for degree\\\n distribution (default = 30)\", type = int,\n default = 30)\n parser.add_argument(\"--rsmooth\",\n help = \"Smoothing parameter for smooth Hill estimator\\\n (default = 2)\",\n type = int, default = 2)\n parser.add_argument(\"--alphakernel\",\n help = \"Alpha parameter used for kernel-type estimator.\\\n Should be greater than 0.5 (default = 0.6).\",\n type = float, default = 0.6)\n parser.add_argument(\"--hsteps\",\n help = \"Parameter to select number of bandwidth\\\n steps for kernel-type estimator, (default = 200).\",\n type = int, default = 200)\n parser.add_argument(\"--noise\",\n help = \"Switch on/off uniform noise in range\\\n [-5*10^(-p), 5*10^(-p)] that is added to each\\\n data point. Used for integer-valued sequences\\\n with p = 1 (default = 1).\",\n type = int, default = 1)\n parser.add_argument(\"--pnoise\",\n help = \"Uniform noise parameter corresponding to\\\n the rounding error of the data sequence. For integer\\\n values it equals to 1. (default = 1).\",\n type = int, default = 1)\n parser.add_argument(\"--bootstrap\",\n help = \"Flag to switch on/off double-bootstrap\\\n algorithm for defining optimal order statistic\\\n of Hill, moments and kernel-type estimators.\\\n (default = 1)\",\n type = int, default = 1)\n parser.add_argument(\"--tbootstrap\",\n help = \"Fraction of bootstrap samples in the 2nd\\\n bootstrap defined as n*tbootstrap, i.e., for\\\n n*0.5 a n/2 is the size of a single bootstrap\\\n sample (default = 0.5).\",\n type = float, default = 0.5)\n parser.add_argument(\"--rbootstrap\",\n help = \"Number of bootstrap resamplings used in\\\n double-bootstrap. Note that each sample results\\\n are stored in an array, so be careful about the\\\n memory (default = 500).\",\n type = int, default = 500)\n parser.add_argument(\"--amseborder\",\n help = \"Upper bound for order statistic to consider\\\n for double-bootstrap AMSE minimizer.\\\n Entries that are smaller or equal to the border value\\\n are ignored during AMSE minimization (default = 1).\",\n type = float, default = 1.0)\n parser.add_argument(\"--theta1\",\n help = \"Lower bound of plotting range, defined as\\\n k_min = ceil(n^theta1), (default = 0.01).\\\n Overwritten if plots behave badly within the range.\",\n type = float, default = 0.01)\n parser.add_argument(\"--theta2\",\n help = \"Upper bound of plotting range, defined as\\\n k_max = floor(n^theta2), (default = 0.99).\\\n Overwritten if plots behave badly within the range.\",\n type = float, default = 0.99)\n parser.add_argument(\"--diagplots\",\n help = \"Flag to switch on/off plotting AMSE statistics\\\n for Hill/moments/kernel-type double-bootstrap algorithm.\\\n Used for diagnostics when double-bootstrap provides unstable\\\n results. Can be used to find proper amseborder parameter.\\\n (default = 0).\",\n type = int, default = 0)\n parser.add_argument(\"--verbose\",\n help = \"Verbosity of bootstrap procedure.\\\n (default = 0).\",\n type = int, default = 0)\n parser.add_argument(\"--savedata\",\n help = \"Flag to save data files in the directory\\\n with plots.\\\n (default = 0)\",\n type = int, default = 0)\n parser.add_argument(\"--delimiter\",\n help = \"Delimiter used in the input file.\\\n Options are: whitespace, tab, comma, semicolon.\",\n type = str, default = \"whitespace\")\n args = parser.parse_args()\n\n # check arguments for consistency\n if args.nbins <= 0:\n parser.error(\"Number of bins should be greater than 0.\")\n if args.rsmooth < 2:\n parser.error(\"r_smooth should be greater than 1.\")\n if args.alphakernel <= 0.5:\n parser.error(\"alpha of kernel estimator should be grater than 0.5.\")\n if args.hsteps <= 0:\n parser.error(\"hsteps should be greater than 0.\")\n if args.noise != 0 and args.noise != 1:\n parser.error(\"noise flag should be 0 or 1.\")\n if args.pnoise < 0:\n parser.error(\"pnoise parameter should be greater or equal to 0.\")\n if args.bootstrap != 0 and args.bootstrap != 1:\n parser.error(\"bootstrap flag should be 0 or 1.\")\n if args.tbootstrap <= 0.0 or args.tbootstrap >= 1.0:\n parser.error(\"tbootstrap should be in range (0, 1).\")\n if args.rbootstrap <= 0:\n parser.error(\"Number of bootstrap resamples should be greater than 0.\")\n if args.amseborder <= 0.0:\n parser.error(\"amseborder should be greater than 0.\")\n if args.diagplots != 0 and args.diagplots != 1:\n parser.error(\"diagplots flag should be 0 or 1.\")\n if args.verbose != 0 and args.verbose != 1:\n parser.error(\"verbose flag should be 0 or 1.\")\n if args.savedata != 0 and args.savedata != 1:\n parser.error(\"savedata flag should be 0 or 1.\")\n if args.theta1 < 0.0 or args.theta1 > 1.0:\n parser.error(\"Theta parameters should be in [0,1] range, where theta1 < theta2.\")\n if args.theta2 < 0.0 or args.theta2 > 1.0:\n parser.error(\"Theta parameters should be in [0,1] range, where theta1 < theta2.\")\n if args.theta2 <= args.theta1:\n parser.error(\"Theta parameters should be in [0,1] range, where theta1 < theta2.\")\n if args.delimiter not in set(['whitespace', 'tab', 'comma', 'semicolon']):\n parser.error(\"Delimiter provided is not supported.\")\n\n number_of_bins = args.nbins\n r_smooth = args.rsmooth\n alpha = args.alphakernel\n hsteps = args.hsteps\n if args.noise == 1:\n noise_flag = True\n else:\n noise_flag = False\n p_noise = args.pnoise\n if args.bootstrap == 1:\n bootstrap_flag = True\n else:\n bootstrap_flag = False\n t_bootstrap = args.tbootstrap\n r_bootstrap = args.rbootstrap\n amse_border = args.amseborder\n if args.diagplots == 1:\n diagnostic_plots_flag = True\n else:\n diagnostic_plots_flag = False\n\n if args.verbose == 1:\n verbose = True\n else:\n verbose = False\n\n if args.delimiter == \"whitespace\":\n delimiter = \" \"\n elif args.delimiter == \"tab\":\n delimiter = \"\\t\"\n elif args.delimiter == \"comma\":\n delimiter = \",\"\n elif args.delimiter == \"semicolon\":\n delimiter = \";\"\n\n # check for number of entries\n N = 0\n with open(args.sequence_file_path, \"r\") as f:\n for line in f:\n degree, count = line.strip().split(delimiter)\n N += int(count)\n print(\"========== Tail Index Estimation ==========\")\n print(\"Number of data entries: %i\" % N)\n ordered_data = np.zeros(N)\n current_index = 0 \n with open(args.sequence_file_path, \"r\") as f:\n for line in f:\n degree, count = line.strip().split(delimiter)\n ordered_data[current_index:current_index + int(count)] = float(degree)\n current_index += int(count)\n\n #enforce minimization boundary to the order statistics larger than border value\n eps_stop = 1 - float(len(ordered_data[np.where(ordered_data <= amse_border)]))\\\n /len(ordered_data)\n print(\"========================\")\n print(\"Selected AMSE border value: %0.4f\"%amse_border) \n print(\"Selected fraction of order statistics boundary for AMSE minimization: %0.4f\"%eps_stop)\n print(\"========================\")\n make_plots(ordered_data, args.output_file_path, number_of_bins,\n r_smooth, alpha, hsteps, bootstrap_flag, t_bootstrap,\n r_bootstrap, diagnostic_plots_flag, eps_stop, \n args.theta1, args.theta2, verbose, noise_flag,\n p_noise, args.savedata)\n\n\nif __name__ == '__main__':\n \n t1 = time.time()\n main()\n t2 = time.time()\n print(\"Elapsed time (total):\", t2-t1)\n\n"
] | [
[
"numpy.random.choice",
"numpy.where",
"numpy.cumsum",
"numpy.logspace",
"numpy.histogram",
"numpy.log",
"matplotlib.pyplot.subplots",
"numpy.nanmin",
"numpy.arange",
"numpy.sqrt",
"numpy.log10",
"numpy.array",
"numpy.zeros",
"numpy.round",
"numpy.diff",
"numpy.nanargmin",
"numpy.floor",
"numpy.abs",
"numpy.linspace",
"numpy.unique"
]
] |
Art-Ev/AequilibraE-GUI | [
"0c6ea37dcb5079cca499a4e17f0f96586c887be7"
] | [
"common_tools/numpy_model.py"
] | [
"\"\"\"\n -----------------------------------------------------------------------------------------------------------\n Package: AequilibraE\n\n Name: NumPy Model\n Purpose: Loads numpy to a GUI in an efficient fashion\n\n Original Author: Pedro Camargo ([email protected])\n Contributors:\n Last edited by: Pedro Camargo\n\n Website: www.AequilibraE.com1\n Repository: https://github.com/AequilibraE/AequilibraE\n\n Created: 2014-03-19\n Updated: 2017-10-02\n Copyright: (c) AequilibraE authors\n Licence: See LICENSE.TXT\n -----------------------------------------------------------------------------------------------------------\n \"\"\"\nimport numpy as np\nfrom qgis.PyQt import QtWidgets, uic, QtCore\nfrom qgis.PyQt.QtCore import *\nimport qgis\n\nQt = QtCore.Qt\n\n\n# This class was adapted from https://www.mail-archive.com/[email protected]/msg17575.html\n# Provided by David Douard\n\n# adaptations for headers come from: http://stackoverflow.com/questions/14135543/how-to-set-the-qtableview-header-name-in-pyqt4\n\n\nclass NumpyModel(QtCore.QAbstractTableModel):\n def __init__(self, aeq_matrix, separator, decimals, parent=None):\n QtCore.QAbstractTableModel.__init__(self, parent)\n self._array = aeq_matrix\n self.separator = separator\n self.decimals = decimals\n if self.separator:\n self.row_headers_data = [\"{:,}\".format(x) for x in aeq_matrix.index[:]]\n self.header_data = [\"{:,}\".format(x) for x in aeq_matrix.index[:]]\n else:\n self.row_headers_data = [str(x) for x in aeq_matrix.index[:]]\n self.header_data = [str(x) for x in aeq_matrix.index[:]]\n\n if np.issubdtype(aeq_matrix.dtype, np.integer):\n self.empties = np.iinfo(aeq_matrix.dtype).min\n self.decimals = 0\n\n def rowCount(self, parent=None):\n if self._array.matrix_view is None:\n return 0\n else:\n return self._array.matrix_view.shape[0]\n\n def columnCount(self, parent=None):\n if self._array.matrix_view is None:\n return 0\n else:\n return self._array.matrix_view.shape[1]\n\n def data(self, index, role=Qt.DisplayRole):\n if index.isValid():\n if role == Qt.DisplayRole:\n row = index.row()\n col = index.column()\n\n separator = \"\"\n if self.separator:\n separator = \",\"\n\n if np.issubdtype(self._array.dtype, np.integer):\n if self._array.matrix_view[row, col] == self.empties:\n return \"\"\n else:\n return (\"{:\" + separator + \".\" + str(self.decimals) + \"f}\").format(\n self._array.matrix_view[row, col]\n )\n else:\n if np.isnan(self._array.matrix_view[row, col]):\n return \"\"\n else:\n return (\"{:\" + separator + \".\" + str(self.decimals) + \"f}\").format(\n self._array.matrix_view[row, col]\n )\n\n def headerData(self, col, orientation, role=Qt.DisplayRole):\n if role == Qt.DisplayRole and orientation == Qt.Horizontal:\n return self.header_data[col]\n if role == Qt.DisplayRole and orientation != Qt.Horizontal:\n return self.row_headers_data[col]\n\n return QtCore.QAbstractTableModel.headerData(self, col, orientation, role)\n"
] | [
[
"numpy.issubdtype",
"numpy.iinfo",
"numpy.isnan"
]
] |
remmyzen/nqs-tensorflow2 | [
"2af5d5ebb108eac4d2daa5082bdef11c8107bd1b"
] | [
"model/mlp/realpos/mlp_realpos.py"
] | [
"from model.mlp import MLP\nimport tensorflow as tf\nimport copy\nfrom functools import partial\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\nclass MLPRealPos(MLP):\n \"\"\"\n This class is used to define a multilayer perceptron with real and \n positive wavefunction. \n\n \\Psi(x) = \\sigma_N(...\\sigma_2(\\sigma_1(xW_1 + b_1)W_2 + b_2)....)\n\n where \\sigma_i is the activation function of layer i, W_i is the weights, and\n b is the biases.\n\n For Cai and Liu paper (PhysRevB.97.035116):\n activation_output = sigmoid\n activation_hidden = tanh\n \"\"\"\n\n def __init__(self, num_visible, num_hidden=[256], activation_hidden='relu', activation_output=None, num_expe=None, use_bias=True, freeze_layer=[], freeze_pos=None):\n \"\"\"\n Construct an multilayer perceptron model for real positive wavefunction.\n \n Args:\n num_visible: number of input nodes in the input layer.\n num_hidden: number of hidden nodes in the hidden layer represented in an array.\n activation_hidden: the activation in the hidden layer.\n activation_output: the activation in the output layer.\n num_expe: number of experiment to determine the seed.\n use_bias: whether to use bias or not.\n freeze_layer: a list to freeze the weights or the biases.\n where the index 0 and 1 refers to the weights and biases\n from input layer to the first hidden layer, respectively,\n and so on.\n \"\"\"\n \n MLP.__init__(self, num_visible, num_hidden)\n self.activation_hidden = activation_hidden\n self.activation_output = activation_output\n self.use_bias = use_bias\n self.freeze_layer = freeze_layer\n self.num_expe = num_expe\n\n self.freeze_pos = freeze_pos\n\n ## Set the same seed\n if num_expe is not None:\n np.random.seed(num_expe)\n tf.random.set_seed(num_expe)\n\n self.build_model()\n\n def build_model(self):\n \"\"\"\n Create the model with Keras\n \"\"\"\n inputs = tf.keras.layers.Input(shape=(self.num_visible,)) \n for ii in range(self.num_layer):\n if ii == 0:\n hidden = tf.keras.layers.Dense(self.num_hidden[ii], activation=self.activation_hidden, use_bias=self.use_bias)(inputs)\n # , kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01))(inputs)\n # , kernel_initializer=tf.keras.initializers.RandomNormal(stddev=5.))(inputs)\n else:\n hidden = tf.keras.layers.Dense(self.num_hidden[ii], activation=self.activation_hidden, use_bias=self.use_bias)(hidden)\n #, kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01))(hidden)\n # , kernel_initializer=tf.keras.initializers.RandomNormal(stddev=5.))(hidden)\n \n \n\n outputs = tf.keras.layers.Dense(1, activation=self.activation_output, use_bias=self.use_bias)(hidden)\n self.model = tf.keras.models.Model(inputs = inputs, outputs = outputs) \n\n def log_val(self, x):\n \"\"\"\n Calculate log(\\Psi(x))\n Args:\n x: the x\n \"\"\"\n return tf.math.log(self.model(x, training=True))\n\n def log_val_diff(self, xprime, x):\n \"\"\"\n Calculate log(\\Psi(x')) - log(\\Psi(x))\n Args:\n xprime: x'\n x: x\n \"\"\"\n log_val_1 = self.log_val(xprime)\n log_val_2 = self.log_val(x)\n return log_val_1 - log_val_2\n\n def derlog(self, x):\n \"\"\"\n Calculate $D_{W}(x) = D_{W} = (1 / \\Psi(x)) * (d \\Psi(x) / dW) = d \\log \\Psi(x) / dW$ where W can be the weights or the biases.\n \"\"\"\n with tf.GradientTape() as tape:\n output = tf.exp(self.log_val(x))\n #output = self.log_val(x)\n \n gradients = tape.jacobian(output, self.model.trainable_weights)\n\n #gradients_new = [tf.squeeze(grad, 1) for grad in gradients]\n \n gradients_new = []\n for ii, grad in enumerate(gradients):\n if ii in self.freeze_layer:\n grad = grad * 0.\n\n ## reshape so it can be divided by output\n grad = tf.reshape(grad, (grad.shape[0], *grad.shape[2:]))\n\n old_shape = grad.shape\n grad = tf.reshape(grad, (grad.shape[0], -1)) / output\n grad = tf.reshape(grad, old_shape)\n \n if ii == 0 and self.freeze_pos is not None:\n grad = grad.numpy()\n grad[self.freeze_pos] = 0.\n grad = tf.convert_to_tensor(grad)\n \n \n gradients_new.append(grad)\n \n return gradients_new\n\n def get_parameters(self):\n \"\"\"\n Get the parameters for this model\n \"\"\"\n if self.model is None:\n return self.params\n else:\n return self.model.get_weights()\n\n def set_parameters(self, params):\n \"\"\"\n Set the parameters for this model for transfer learning or loading model purposes\n Args:\n params: the parameters to be set.\n \"\"\"\n self.model.set_weights(params)\n\n def param_difference (self, first_param, last_param):\n \"\"\"\n Calculate the difference between two parameters.\n This is equals to the sum of the mean squared difference of all parameters (weights and biases)\n \"\"\" \n sum_diff = 0.\n for (par1, par2) in zip(first_param[1], last_param[1]):\n sum_diff += np.mean((par1 - par2) ** 2)\n\n return sum_diff\n \n def visualize_param (self, params, path):\n \"\"\"\n Visualize every parameters\n Args:\n params: the parameters that visualize\n path: the path to save the visualization\n \"\"\"\n epoch = params[0]\n for ii, param in enumerate(params[1]):\n ## Reshape for bias\n if len(param.shape) == 1:\n param = np.reshape(param, (param.shape[0],1))\n\n plt.figure()\n if ii % 2 == 0:\n plt.title(\"Weight layer %d at epoch %d\" % (ii + 1, epoch))\n else:\n plt.title(\"Bias layer %d at epoch %d\" % (ii + 1, epoch))\n plt.imshow(param, cmap='hot', interpolation='nearest')\n plt.xticks(np.arange(0, param.shape[1], 1.0))\n plt.yticks(np.arange(0, param.shape[0], 1.0))\n plt.colorbar()\n plt.tight_layout()\n if ii % 2 == 0:\n plt.savefig(path + '/weight-layer-%d-%d.png' % (ii+1, epoch))\n else:\n plt.savefig(path + '/bias-layer-%d-%d.png' % (ii+1, epoch))\n plt.close()\n\n def get_name(self):\n \"\"\"\n Get the name of the model\n \"\"\"\n hidden_layer_str = '-'.join([str(hid) for hid in self.num_hidden])\n return 'mlprealpos-%s' % (hidden_layer_str)\n \n def make_pickle_object(self):\n \"\"\"\n Tensorflow object cannot be pickled so needs to be handled\n save the last param first and make it none\n \"\"\"\n self.params = self.get_parameters()\n self.model = None \n self.activation_hidden = None\n self.activation_output = None\n\n def __str__(self):\n return 'MLPRealPositive %s' % (self.num_hidden)\n\n def to_xml(self):\n stri = \"\"\n stri += \"<model>\\n\"\n stri += \"\\t<type>MLPRealPositive</type>\\n\"\n stri += \"\\t<params>\\n\"\n stri += \"\\t\\t<num_visible>%d</num_visible>\\n\" % self.num_visible\n stri += \"\\t\\t<num_hidden>%s</num_hidden>\\n\" % self.num_hidden\n stri += \"\\t\\t<activation_output>%s</activation_output>\\n\" % str(self.activation_output)\n stri += \"\\t\\t<activation_hidden>%s</activation_hidden>\\n\" % str(self.activation_hidden)\n stri += \"\\t\\t<use_bias>%s</use_bias>\\n\" % str(self.use_bias)\n stri += \"\\t\\t<num_expe>%s</num_expe>\\n\" % str(self.num_expe)\n stri += \"\\t\\t<freeze_layer>%s</freeze_layer>\\n\" % str(self.freeze_layer)\n stri += \"\\t</params>\\n\"\n stri += \"</model>\\n\"\n return stri\n"
] | [
[
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"numpy.mean",
"matplotlib.pyplot.colorbar",
"tensorflow.GradientTape",
"tensorflow.random.set_seed",
"matplotlib.pyplot.savefig",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"matplotlib.use",
"numpy.reshape",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"tensorflow.convert_to_tensor",
"tensorflow.keras.layers.Input",
"numpy.random.seed",
"matplotlib.pyplot.imshow"
]
] |
v-goncharenko/moabb | [
"c652c77d06209d8583b653dbb8bf6282c6f37eb4"
] | [
"moabb/pipelines/features.py"
] | [
"import numpy as np\nimport scipy.signal as signal\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\nclass LogVariance(BaseEstimator, TransformerMixin):\n def fit(self, X, y):\n \"\"\"fit.\"\"\"\n return self\n\n def transform(self, X):\n \"\"\"transform\"\"\"\n assert X.ndim == 3\n return np.log(np.var(X, -1))\n\n\nclass FM(BaseEstimator, TransformerMixin):\n def __init__(self, freq=128):\n \"\"\"instantaneous frequencies require a sampling frequency to be properly\n scaled,\n which is helpful for some algorithms. This assumes 128 if not told\n otherwise.\n\n \"\"\"\n self.freq = freq\n\n def fit(self, X, y):\n \"\"\"fit.\"\"\"\n return self\n\n def transform(self, X):\n \"\"\"transform. \"\"\"\n xphase = np.unwrap(np.angle(signal.hilbert(X, axis=-1)))\n return np.median(self.freq * np.diff(xphase, axis=-1) / (2 * np.pi), axis=-1)\n\n\nclass ExtendedSSVEPSignal(BaseEstimator, TransformerMixin):\n \"\"\"Prepare FilterBank SSVEP EEG signal for estimating extended covariances\n\n Riemannian approaches on SSVEP rely on extended covariances matrices, where\n the filtered signals are contenated to estimate a large covariance matrice.\n\n FilterBank SSVEP EEG are of shape (n_trials, n_channels, n_times, n_freqs)\n and should be convert in (n_trials, n_channels*n_freqs, n_times) to\n estimate covariance matrices of (n_channels*n_freqs, n_channels*n_freqs).\n \"\"\"\n\n def __init__(self):\n \"\"\"Empty init for ExtendedSSVEPSignal\"\"\"\n pass\n\n def fit(self, X, y):\n \"\"\"No need to fit for ExtendedSSVEPSignal\"\"\"\n return self\n\n def transform(self, X):\n \"\"\"Transpose and reshape EEG for extended covmat estimation\"\"\"\n out = X.transpose((0, 3, 1, 2))\n n_trials, n_freqs, n_channels, n_times = out.shape\n out = out.reshape((n_trials, n_channels * n_freqs, n_times))\n return out\n"
] | [
[
"numpy.diff",
"numpy.var",
"scipy.signal.hilbert"
]
] |
samesense/mahdi_epi | [
"ec002df1d6b0dbdd4be8675e48971ed604ee9014"
] | [
"src/scripts/limit_exac_genes.py"
] | [
"import pandas, sys\ndat_file, vcf_file, out_file = sys.argv[1:]\ndf_pre = pandas.read_excel(dat_file)\ngenes = set(df_pre['Gene Symbol'].values)\n\nwith open(vcf_file) as f, open(out_file, 'w') as fout:\n for line in f:\n if line[0] == '#':\n print(line.strip(), file=fout)\n else:\n sp = line.strip().split('\\t')\n effs = sp[-3].split('EFF=')[1].split(';')[0].split(',')\n for eff in effs:\n # downstream_gene_variant(MODIFIER||958|||WASH7P||NON_CODING|NR_024540.1||1)\n# print(eff)\n gene = eff.split('|')[-6]\n if gene in genes:\n print(line.strip(), file=fout)\n"
] | [
[
"pandas.read_excel"
]
] |
qyz55/M2PG | [
"797822e54cc627ebeeb908b239c80e94cd279dcd"
] | [
"src/modules/mixers/hqmix_noabs.py"
] | [
"import torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass HQMixerFF(nn.Module):\n def __init__(self, args):\n super(HQMixerFF, self).__init__()\n self.args = args\n self.n_agents = args.n_agents\n activation_func=nn.LeakyReLU()\n self.latent_dim = args.latent_dim\n self.state_dim = int(np.prod(args.state_shape))\n NN_HIDDEN_SIZE = self.state_dim\n self.embed_dim = args.central_mixing_embed_dim\n self.mid_dim = args.mid_dim\n if args.concat_ori_s:\n HIDDEN_ALL = self.state_dim + NN_HIDDEN_SIZE + self.n_agents\n else:\n HIDDEN_ALL = NN_HIDDEN_SIZE + self.n_agents\n self.net = nn.Sequential(nn.Linear(HIDDEN_ALL, self.embed_dim),\n nn.ReLU(),\n nn.Linear(self.embed_dim, self.embed_dim),\n nn.ReLU(),\n nn.Linear(self.embed_dim, self.embed_dim),\n nn.ReLU(),\n nn.Linear(self.embed_dim, 1))\n self.V = nn.Sequential(nn.Linear(HIDDEN_ALL - self.n_agents, self.embed_dim),\n nn.ReLU(),\n nn.Linear(self.embed_dim, 1))\n\n self.latent_net = nn.Sequential(nn.Linear(args.latent_dim, NN_HIDDEN_SIZE),\n nn.BatchNorm1d(NN_HIDDEN_SIZE),\n activation_func)\n\n #(bs,t,n),\n def forward(self, agent_qs, states, skill): #skill:(bs,t,latent_dim) state:(bs,t,all_obs)\n bs = agent_qs.size(0)\n r_s = skill.reshape(-1, self.latent_dim)#(bs,t,latent_dim)\n r_s = self.latent_net(r_s) #(bs*t, NN_HIDDEN_SIZE)\n states = states.reshape(-1, self.state_dim) #(bs*t, all_obs)\n agent_qs = agent_qs.reshape(-1, self.n_agents) #(bs*t, n)\n # First layer\n if self.args.concat_ori_s:\n input = th.cat([states, r_s, agent_qs], dim=1)\n else:\n input = th.cat([r_s, agent_qs], dim=1)\n advs = self.net(input)\n # State-dependent bias\n vs = self.V(th.cat([r_s, states], dim=1))\n y = advs + vs\n # Reshape and return\n q_tot = y.view(bs, -1, 1)\n return q_tot #(bs,t,1)\n"
] | [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.LeakyReLU",
"torch.nn.ReLU",
"numpy.prod",
"torch.nn.BatchNorm1d"
]
] |
TokyAxel/nevergrad | [
"063909c3f70d6b12c097c9146243287c6ea5fa1d"
] | [
"nevergrad/parametrization/core.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport uuid\nimport warnings\nimport numpy as np\nimport nevergrad.common.typing as tp\nfrom nevergrad.common import errors\nfrom . import utils\nfrom ._layering import ValueProperty as ValueProperty\nfrom ._layering import Layered as Layered\nfrom ._layering import Level as Level\n\n\n# pylint: disable=no-value-for-parameter,pointless-statement,import-outside-toplevel\n\n\nP = tp.TypeVar(\"P\", bound=\"Parameter\")\n\n\n# pylint: disable=too-many-public-methods\nclass Parameter(Layered):\n \"\"\"Class providing the core functionality of a parameter, aka\n value, internal/model parameters, mutation, recombination\n and additional features such as shared random state,\n constraint check, hashes, generation and naming.\n The value field should sent to the function to optimize.\n\n Example\n -------\n >>> ng.p.Array(shape=(2,)).value\n array([0., 0.])\n \"\"\"\n\n # By default, all Parameter attributes of this Parameter are considered as\n # sub-parameters.\n # Spawning a child creates a shallow copy.\n\n _LAYER_LEVEL = Level.ROOT\n value: ValueProperty[tp.Any, tp.Any] = ValueProperty()\n\n def __init__(self) -> None:\n # Main features\n super().__init__()\n self._subobjects = utils.Subobjects(\n self, base=Parameter, attribute=\"__dict__\"\n ) # registers and apply functions too all (sub-)Parameter attributes\n self.parents_uids: tp.List[str] = []\n self.heritage: tp.Dict[tp.Hashable, tp.Any] = {\"lineage\": self.uid} # passed through to children\n self.loss: tp.Optional[float] = None # associated loss\n self._losses: tp.Optional[np.ndarray] = None # associated losses (multiobjective) as an array\n self._dimension: tp.Optional[int] = None\n # Additional convenient features\n self._random_state: tp.Optional[np.random.RandomState] = None # lazy initialization\n self._generation = 0\n self._constraint_checkers: tp.List[tp.Callable[[tp.Any], tp.Union[bool, float]]] = []\n self._name: tp.Optional[str] = None\n self._frozen = False\n self._meta: tp.Dict[tp.Hashable, tp.Any] = {} # for anything algorithm related\n self.function = utils.FunctionInfo()\n\n @property\n def descriptors(self) -> utils.DeprecatedDescriptors: # TODO remove\n return utils.DeprecatedDescriptors(self)\n\n @property\n def losses(self) -> np.ndarray:\n \"\"\"Possibly multiobjective losses which were told\n to the optimizer along this parameter.\n In case of mono-objective loss, losses is the array containing this loss as sole element\n\n Note\n ----\n This API is highly experimental\n \"\"\"\n if self._losses is not None:\n return self._losses\n if self.loss is not None:\n return np.array([self.loss], dtype=float)\n raise RuntimeError(\"No loss was provided\")\n\n @property\n def args(self) -> tp.Tuple[tp.Any, ...]:\n \"\"\"Value of the positional arguments.\n Used to input value in a function as `func(*param.args, **param.kwargs)`\n Use `parameter.Instrumentation` to set `args` and `kwargs` with full freedom.\n \"\"\"\n return (self.value,)\n\n @property\n def kwargs(self) -> tp.Dict[str, tp.Any]:\n \"\"\"Value of the keyword arguments.\n Used to input value in a function as `func(*param.args, **param.kwargs)`\n Use `parameter.Instrumentation` to set `args` and `kwargs` with full freedom.\n \"\"\"\n return {}\n\n @property\n def dimension(self) -> int:\n \"\"\"Dimension of the standardized space for this parameter\n i.e size of the vector returned by get_standardized_data(reference=...)\n \"\"\"\n if self._dimension is None:\n try:\n self._dimension = self.get_standardized_data(reference=self).size\n except errors.UnsupportedParameterOperationError:\n self._dimension = 0\n return self._dimension\n\n def mutate(self) -> None:\n \"\"\"Mutate parameters of the instance, and then its value\"\"\"\n self._check_frozen()\n self._subobjects.apply(\"mutate\")\n self._layers[-1]._layered_mutate()\n\n def _layered_mutate(self) -> None:\n self.set_standardized_data(self.random_state.normal(size=self.dimension))\n\n def sample(self: P) -> P:\n \"\"\"Sample a new instance of the parameter.\n This usually means spawning a child and mutating it.\n This function should be used in optimizers when creating an initial population,\n and parameter.heritage[\"lineage\"] is reset to parameter.uid instead of its parent's\n \"\"\"\n # inner working can be overrided by _layer_sample()\n self.random_state # make sure to populate it before copy\n child = self._layers[-1]._layered_sample()\n if not isinstance(child, Parameter) and not isinstance(child, type(self)):\n raise errors.NevergradRuntimeError(\"Unexpected sample return type\")\n child._set_parenthood(None)\n return child # type: ignore\n\n def recombine(self: P, *others: P) -> None:\n \"\"\"Update value and parameters of this instance by combining it with\n other instances.\n\n Parameters\n ----------\n *others: Parameter\n other instances of the same type than this instance.\n \"\"\"\n if not others:\n return\n self._check_frozen()\n assert all(isinstance(o, self.__class__) for o in others)\n self._subobjects.apply(\"recombine\", *others)\n self._layers[-1]._layered_recombine(*others)\n\n def get_standardized_data(self: P, *, reference: P) -> np.ndarray:\n \"\"\"Get the standardized data representing the value of the instance as an array in the optimization space.\n In this standardized space, a mutation is typically centered and reduced (sigma=1) Gaussian noise.\n The data only represent the value of this instance, not the parameters (eg.: mutable sigma), hence it does not\n fully represent the state of the instance. Also, in stochastic cases, the value can be non-deterministically\n deduced from the data (eg.: categorical variable, for which data includes sampling weights for each value)\n\n Parameters\n ----------\n reference: Parameter\n the reference instance for representation in the standardized data space. This keyword parameter is\n mandatory to make the code clearer.\n If you use \"self\", this method will always return a zero vector.\n\n Returns\n -------\n np.ndarray\n the representation of the value in the optimization space\n\n Note\n ----\n - Operations between different standardized data should only be performed if each array was produced\n by the same reference in the exact same state (no mutation)\n - to make the code more explicit, the \"reference\" parameter is enforced as a keyword-only parameter.\n \"\"\"\n assert reference is None or isinstance(\n reference, self.__class__\n ), f\"Expected {type(self)} but got {type(reference)} as reference\"\n return self._internal_get_standardized_data(self if reference is None else reference)\n\n def _internal_get_standardized_data(self: P, reference: P) -> np.ndarray:\n raise errors.UnsupportedParameterOperationError(\n f\"Export to standardized data space is not implemented for {self.name}\"\n )\n\n def set_standardized_data(self: P, data: tp.ArrayLike, *, reference: tp.Optional[P] = None) -> P:\n \"\"\"Updates the value of the provided reference (or self) using the standardized data.\n\n Parameters\n ----------\n np.ndarray\n the representation of the value in the optimization space\n reference: Parameter\n the reference point for representing the data (\"self\", if not provided)\n\n Returns\n -------\n Parameter\n self (modified)\n\n Note\n ----\n To make the code more explicit, the \"reference\" is enforced\n as keyword-only parameters.\n \"\"\"\n sent_reference = self if reference is None else reference\n assert isinstance(\n sent_reference, self.__class__\n ), f\"Expected {type(self)} but got {type(sent_reference)} as reference\"\n self._check_frozen()\n del self.value # remove all cached information\n self._internal_set_standardized_data(np.array(data, copy=False), reference=sent_reference)\n return self\n\n def _internal_set_standardized_data( # pylint: disable=unused-argument\n self: P, data: np.ndarray, reference: P\n ) -> None:\n if data.size:\n raise errors.UnsupportedParameterOperationError(\n f\"Import from standardized data space is not implemented for {self.name}\"\n )\n\n # PART 2 - Additional features\n\n @property\n def generation(self) -> int:\n \"\"\"Generation of the parameter (children are current generation + 1)\"\"\"\n return self._generation\n\n def get_value_hash(self) -> tp.Hashable:\n \"\"\"Hashable object representing the current value of the instance\"\"\"\n val = self.value\n if isinstance(val, (str, bytes, float, int)):\n return val\n elif isinstance(val, np.ndarray):\n return val.tobytes()\n else:\n raise errors.UnsupportedParameterOperationError(\n f\"Value hash is not supported for object {self.name}\"\n )\n\n def __repr__(self) -> str:\n strings = [self.name]\n if not callable(self.value): # not a mutation\n strings.append(str(self.value))\n return \":\".join(strings)\n\n # %% Constraint management\n def satisfies_constraints(self) -> bool:\n \"\"\"Whether the instance satisfies the constraints added through\n the `register_cheap_constraint` method\n\n Returns\n -------\n bool\n True iff the constraint is satisfied\n \"\"\"\n inside = self._subobjects.apply(\"satisfies_constraints\")\n if not all(inside.values()):\n return False\n if not self._constraint_checkers:\n return True\n val = self.value\n return all(utils.float_penalty(func(val)) <= 0 for func in self._constraint_checkers)\n\n def register_cheap_constraint(\n self,\n func: tp.Union[tp.Callable[[tp.Any], bool], tp.Callable[[tp.Any], float]],\n as_layer: bool = False,\n ) -> None:\n \"\"\"Registers a new constraint on the parameter values.\n\n Parameters\n ----------\n func: Callable\n function which, given the value of the instance, returns whether it satisfies the constraints (if output = bool),\n or a float which is >= 0 if the constraint is satisfied.\n\n Note\n ----\n - this is only for checking after mutation/recombination/etc if the value still satisfy the constraints.\n The constraint is not used in those processes.\n - constraints should be fast to compute.\n - this function has an additional \"as_layer\" parameter which is experimental for now, and can have unexpected\n behavior\n \"\"\"\n if getattr(func, \"__name__\", \"not lambda\") == \"<lambda>\": # LambdaType does not work :(\n warnings.warn(\"Lambda as constraint is not advised because it may not be picklable.\")\n if not as_layer:\n self._constraint_checkers.append(func)\n else:\n from nevergrad.ops.constraints import Constraint\n import nevergrad as ng\n\n compat_func = (\n func\n if not isinstance(self, ng.p.Instrumentation)\n else utils._ConstraintCompatibilityFunction(func)\n )\n self.add_layer(Constraint(compat_func)) # type: ignore\n\n # %% random state\n\n @property\n def random_state(self) -> np.random.RandomState:\n \"\"\"Random state the instrumentation and the optimizers pull from.\n It can be seeded/replaced.\n \"\"\"\n if self._random_state is None:\n # use the setter, to make sure the random state is propagated to the variables\n seed = np.random.randint(2 ** 32, dtype=np.uint32) # better way?\n self._set_random_state(np.random.RandomState(seed))\n assert self._random_state is not None\n return self._random_state\n\n @random_state.setter\n def random_state(self, random_state: np.random.RandomState) -> None:\n self._set_random_state(random_state)\n\n def _set_random_state(self, random_state: np.random.RandomState) -> None:\n self._random_state = random_state\n self._subobjects.apply(\"_set_random_state\", random_state)\n\n def spawn_child(self: P, new_value: tp.Optional[tp.Any] = None) -> P:\n \"\"\"Creates a new instance which shares the same random generator than its parent,\n is sampled from the same data, and mutates independently from the parentp.\n If a new value is provided, it will be set to the new instance\n\n Parameters\n ----------\n new_value: anything (optional)\n if provided, it will update the new instance value (cannot be used at the same time as new_data).\n\n Returns\n -------\n Parameter\n a new instance of the same class, with same content/internal-model parameters/...\n Optionally, a new value will be set after creation\n \"\"\"\n # make sure to initialize the random state before spawning children\n self.random_state # pylint: disable=pointless-statement\n child = self.copy()\n child._set_parenthood(self)\n if new_value is not None:\n child.value = new_value\n return child\n\n def copy(self: P) -> P:\n \"\"\"Creates a full copy of the parameter (with new unique uid).\n Use spawn_child instead to make sure to add the parenthood information.\n \"\"\"\n child = super().copy()\n child.uid = uuid.uuid4().hex\n child._frozen = False\n child._subobjects = self._subobjects.new(child)\n child._meta = {}\n child.parents_uids = list(self.parents_uids)\n child.heritage = dict(self.heritage)\n child.loss = None\n child._losses = None\n child._constraint_checkers = list(self._constraint_checkers)\n # layers\n if self is not self._layers[0]:\n raise errors.NevergradRuntimeError(\"Something has gone horribly wrong with the layers\")\n # subparameters\n attribute = self._subobjects.attribute\n container = getattr(child, attribute)\n if attribute != \"__dict__\": # make a copy of the container if different from __dict__\n container = dict(container) if isinstance(container, dict) else list(container)\n setattr(child, attribute, container)\n for key, val in self._subobjects.items():\n container[key] = val.copy()\n del child.value # clear cache\n return child\n\n def _set_parenthood(self, parent: tp.Optional[\"Parameter\"]) -> None:\n \"\"\"Sets the parenthood information to Parameter and subparameters.\"\"\"\n if parent is None:\n self._generation = 0\n self.heritage = dict(lineage=self.uid)\n self.parents_uids = []\n else:\n self._generation = parent.generation + 1\n self.parents_uids = [parent.uid]\n self._subobjects.apply(\"_set_parenthood\", parent)\n\n def freeze(self) -> None:\n \"\"\"Prevents the parameter from changing value again (through value, mutate etc...)\"\"\"\n self._frozen = True\n self._subobjects.apply(\"freeze\")\n\n def _check_frozen(self) -> None:\n if self._frozen and not isinstance(\n self, Constant\n ): # nevermind constants (since they dont spawn children)\n raise RuntimeError(\n f\"Cannot modify frozen Parameter {self.name}, please spawn a child and modify it instead\"\n \"(optimizers freeze the parametrization and all asked and told candidates to avoid border effects)\"\n )\n # make sure the random state is initialized if we need to update it (aka if not frozen)\n self.random_state # pylint: disable=pointless-statement\n self._subobjects.apply(\"_check_frozen\")\n\n\n# Basic types and helpers #\n\n\nclass Constant(Parameter):\n \"\"\"Parameter-like object for simplifying management of constant parameters:\n mutation/recombination do nothing, value cannot be changed, standardize data is an empty array,\n child is the same instance.\n\n Parameter\n ---------\n value: Any\n the value that this parameter will always provide\n \"\"\"\n\n def __init__(self, value: tp.Any) -> None:\n super().__init__()\n if isinstance(value, Parameter) and not isinstance(self, MultiobjectiveReference):\n raise TypeError(\"Only non-parameters can be wrapped in a Constant\")\n self._value = value\n\n def _get_name(self) -> str:\n return str(self._value)\n\n def get_value_hash(self) -> tp.Hashable:\n try:\n return super().get_value_hash()\n except errors.UnsupportedParameterOperationError:\n return \"#non-hashable-constant#\"\n\n def _layered_get_value(self) -> tp.Any:\n return self._value\n\n def _layered_set_value(self, value: tp.Any) -> None:\n different = False\n if isinstance(value, np.ndarray):\n if not np.equal(value, self._value).all():\n different = True\n elif not (value == self._value or value is self._value):\n different = True\n if different:\n raise ValueError(\n f'Constant value can only be updated to the same value (in this case \"{self._value}\")'\n )\n\n def _layered_sample(self: P) -> P:\n return self\n\n def get_standardized_data( # pylint: disable=unused-argument\n self: P, *, reference: tp.Optional[P] = None\n ) -> np.ndarray:\n return np.array([])\n\n def spawn_child(self: P, new_value: tp.Optional[tp.Any] = None) -> P:\n if new_value is not None:\n self.value = new_value # check that it is equal\n return self # no need to create another instance for a constant\n\n def recombine(self: P, *others: P) -> None:\n pass\n\n def mutate(self) -> None:\n pass\n\n\ndef as_parameter(param: tp.Any) -> Parameter:\n \"\"\"Returns a Parameter from anything:\n either the input if it is already a parameter, or a Constant if not\n This is convenient for iterating over Parameter and other objects alike\n \"\"\"\n if isinstance(param, Parameter):\n return param\n else:\n return Constant(param)\n\n\nclass MultiobjectiveReference(Constant):\n def __init__(self, parameter: tp.Optional[Parameter] = None) -> None:\n if parameter is not None and not isinstance(parameter, Parameter):\n raise TypeError(\n \"MultiobjectiveReference should either take no argument or a parameter which will \"\n f\"be used by the optimizer.\\n(received {parameter} of type {type(parameter)})\"\n )\n super().__init__(parameter)\n\n\nclass Operator(Layered):\n \"\"\"Layer object that can be used as an operator on a Parameter\"\"\"\n\n _LAYER_LEVEL = Level.OPERATION\n\n def __call__(self, parameter: Parameter) -> Parameter:\n \"\"\"Applies the operator on a Parameter to create a new Parameter\"\"\"\n new = parameter.copy()\n new.add_layer(self.copy())\n return new\n"
] | [
[
"numpy.equal",
"numpy.array",
"numpy.random.randint",
"numpy.random.RandomState"
]
] |
ZYVE255/ebm-optimizer | [
"9b1cf6014f987ef4b8d65d4a5659c704b6ea15c4"
] | [
"Bell_EBM/StarPlanetSystem.py"
] | [
"# Author: Taylor Bell\n# Last Update: 2019-02-15\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.constants as const\nimport scipy.integrate\nimport scipy.optimize as spopt\nimport warnings\nimport time\n\nfrom .Star import Star\nfrom .Planet import Planet\nfrom .KeplerOrbit import KeplerOrbit\nfrom . import H2_Dissociation_Routines as h2\n\n\nclass System(object):\n \"\"\"A Star+Planet System.\n\n Attributes:\n star (Bell_EBM.Star): The host star.\n planet (Bell_EBM.Planet): The planet.\n \n \"\"\"\n\n def __init__(self, star=None, planet=None, neq=False):\n \"\"\"Initialization function.\n \n Attributes:\n star (Bell_EBM.Star, optional): The host star.\n planet (Bell_EBM.Planet, optional): The planet.\n neq (bool, optional): Whether or not to use non-equilibrium ODE.\n\n \"\"\"\n \n if star is None:\n self.star = Star()\n else:\n self.star = star\n \n if planet is None:\n self.planet = Planet()\n else:\n self.planet = planet\n \n self.neq = neq\n if self.planet.plType == 'bell2018' and neq:\n self.ODE = self.ODE_NEQ\n else:\n self.ODE = self.ODE_EQ\n \n self.planet.orbit.m1 = self.star.mass\n \n def get_phase_periastron(self):\n \"\"\"Get the orbital phase of periastron.\n \n Returns:\n float: The orbital phase of periastron.\n \n \"\"\"\n \n return self.planet.orbit.phase_periastron\n \n \n def get_phase_transit(self):\n \"\"\"Get the orbital phase of transit.\n \n Returns:\n float: The orbital phase of transit.\n \n \"\"\"\n \n return 0.\n \n \n def get_phase_eclipse(self):\n \"\"\"Get the orbital phase of eclipse.\n \n Returns:\n float: The orbital phase of eclipse.\n \n \"\"\"\n \n return self.planet.orbit.phase_eclipse\n \n \n def get_phase(self, t):\n \"\"\"Get the orbital phase.\n \n Args:\n t (ndarray): The time in days.\n \n Returns:\n ndarray: The orbital phase.\n \n \"\"\"\n \n return self.planet.orbit.get_phase(t)\n \n def get_teq(self, t=0):\n \"\"\"Get the planet's equilibrium temperature.\n \n Args:\n t (ndarray, optional): The time in days.\n \n Returns:\n ndarray: The planet's equilibrium temperature at time(s) t.\n \n \"\"\"\n return 0.25**0.25*self.get_tirr(t)\n \n def get_tirr(self, t=0.):\n \"\"\"Get the planet's irradiation temperature.\n \n Args:\n t (ndarray, optional): The time in days.\n \n Returns:\n ndarray: The planet's irradiation temperature at time(s) t.\n \n \"\"\"\n \n if self.planet.orbit.e == 0:\n dist = self.planet.orbit.a*np.ones_like(t)\n else:\n dist = self.planet.orbit.distance(t)\n \n if type(t) == float or type(t) == int:\n dist = float(dist)\n \n return self.star.teff*np.sqrt(self.star.rad/dist)\n \n def Firr(self, t=0., TA=None, bolo=True, tStarBright=None, wav=4.5e-6):\n \"\"\"Calculate the instantaneous irradiation.\n \n Args:\n t (ndarray, optional): The time in days.\n TA (ndarray, optional): The true anomaly in radians.\n bolo (bool, optional): Determines whether computed flux is bolometric\n (True, default) or wavelength dependent (False).\n tStarBright (ndarray): The stellar brightness temperature to use if bolo==False.\n wav (float, optional): The wavelength to use if bolo==False.\n \n Returns:\n ndarray: The instantaneous irradiation.\n \n \"\"\"\n \n # Just grab semi-major axis for circular orbits to speed things up\n if self.planet.orbit.e == 0:\n dist = self.planet.orbit.a*np.ones_like(t)\n else:\n dist = self.planet.orbit.distance(t, TA)\n \n firr = self.planet.absorptivity*self.star.Fstar(bolo, tStarBright, wav)/(np.pi*dist**2)\n \n return firr\n\n def Fin(self, t=0, TA=None, bolo=True, tStarBright=None, wav=4.5e-6):\n \"\"\"Calculate the instantaneous incident flux.\n \n Args:\n t (ndarray, optional): The time in days.\n TA (ndarray, optional): The true anomaly in radians.\n bolo (bool, optional): Determines whether computed flux is bolometric\n (True, default) or wavelength dependent (False).\n tStarBright (ndarray): The stellar brightness temperature to use if bolo==False.\n wav (float, optional): The wavelength to use if bolo==False.\n \n Returns:\n ndarray: The instantaneous incident flux.\n \n \"\"\"\n \n return self.Firr(t, TA, bolo, tStarBright, wav)*self.planet.weight(t, TA)\n\n def lightcurve(self, t=None, T=None, bolo=True, tStarBright=None, wav=4.5e-6, allowReflect=True, allowThermal=True):\n \"\"\"Calculate the planet's lightcurve (ignoring any occultations).\n \n Args:\n t (ndarray, optional): The time in days. If None, will use 1000 time steps around orbit.\n T (ndarray, optional): The temperature map (either shape (1, self.planet.map.npix) and\n constant over time or shape is (t.shape, self.planet.map.npix). If None,\n use self.planet.map.values instead (default).\n bolo (bool, optional): Determines whether computed flux is bolometric\n (True, default) or wavelength dependent (False).\n tStarBright (ndarray): The stellar brightness temperature to use if bolo==False.\n wav (float, optional): The wavelength to use if bolo==False.\n allowReflect (bool, optional): Account for the contribution from reflected light.\n allowThermal (bool, optional): Account for the contribution from thermal emission.\n \n Returns:\n ndarray: The observed planetary flux normalized by the stellar flux.\n \n \"\"\"\n \n if t is None:\n # Use Prot instead as map would rotate\n t = self.planet.orbit.t0+np.linspace(0., self.planet.orbit.Prot, 1000)\n x = t/self.planet.orbit.Prot - np.rint(t[0]/self.planet.orbit.Prot)\n \n if type(t)!=np.ndarray or len(t.shape)<3:\n t = np.array([t]).reshape(-1,1,1)\n \n if T is None:\n T = self.planet.map.values[np.newaxis,:]\n \n if allowThermal:\n fp = self.planet.Fp_vis(t, T, bolo, wav)\n else:\n fp = np.zeros_like(t.flatten())\n \n if allowReflect:\n fRefl = self.Fin(t, None, bolo, tStarBright, wav)\n fRefl *= self.planet.albedo/self.planet.absorptivity # Get only the reflected portion\n fRefl = fRefl*self.planet.weight(t, refPos='SOP')*self.planet.map.pixArea*self.planet.rad**2\n fRefl = np.sum(fRefl, axis=(1,2))\n fp += fRefl\n \n return fp/self.star.Fstar(bolo, tStarBright, wav)\n \n def invert_lc(self, fp_fstar, bolo=True, tStarBright=None, wav=4.5e-6):\n \"\"\"Invert the fp/fstar phasecurve into an apparent temperature phasecurve.\n \n Args:\n fp_fstar (ndarray): The observed planetary flux normalized by the stellar flux.\n bolo (bool, optional): Determines whether computed flux is bolometric (True, default)\n or wavelength dependent (False).\n tBright (ndarray): The brightness temperature to use if bolo==False.\n wav (float, optional): The wavelength to use if bolo==False.\n \n Returns:\n ndarray: The apparent, disk-integrated temperature.\n \n \"\"\"\n \n if bolo:\n return (fp_fstar*self.star.Fstar(bolo)/(np.pi*self.planet.rad**2.)/const.sigma_sb.value)**0.25\n else:\n if tStarBright is None:\n tStarBright = self.star.teff\n a = const.h.value*const.c.value/(const.k_B.value*wav)\n b = np.expm1(a/tStarBright)\n c = 1 + b/(fp_fstar/(self.planet.rad/self.star.rad)**2)\n return a*np.log(c)**-1\n \n def ODE_EQ(self, t, T, dt, TA=None):\n \"\"\"The derivative in temperature with respect to time.\n \n This function neglects for the timescale of dissociation/recombination for bell2018 planets.\n \n Args:\n t (float): The time in days.\n T (ndarray): The temperature map with shape (self.planet.map.npix).\n dt (float): The time step in days.\n TA (ndarray, optional): The true anomaly in radians (much faster to compute if provided).\n \n Returns:\n ndarray: The derivative in temperature with respect to time.\n \n \"\"\"\n \n dt *= 24.*3600.\n \n if not callable(self.planet.cp):\n C = self.planet.C\n else:\n if self.planet.cpParams is None:\n C = (self.planet.mlDepth*self.planet.mlDensity*self.planet.cp(T))\n else:\n C = (self.planet.mlDepth*self.planet.mlDensity*self.planet.cp(T, *self.planet.cpParams))\n \n if self.planet.instRedistFrac!=0:\n dT_flux = ((1-self.planet.instRedistFrac)*self.Fin(t, TA)[0]\n +self.planet.instRedistFrac*np.sum(self.Fin(t, TA))/self.planet.map.npix)\n else:\n dT_flux = self.Fin(t, TA)[0]\n if self.planet.internalFlux!=0:\n dT_flux += self.planet.internalFlux \n dT_flux = (dT_flux-self.planet.Fout(T))*dt/C\n \n # advect gas\n if self.planet.wind_dlon != 0:\n fMoved = self.planet.wind_dlon*dt\n T_upWind = T[self.planet.upwindLatIndex,self.planet.upwindLonIndex]\n dT_adv = (T_upWind-T)*fMoved\n else:\n dT_adv = 0\n \n return dT_flux + dT_adv\n \n def _find_dT(self, dT, dE, T0, chi0, plug, cp):\n \"\"\"The error function to minimize to find the energy partitioning between dT and dDiss.\n \n \"\"\"\n \n dDiss = h2.dissFracApprox(T0+dT, *self.planet.cpParams)-chi0\n dT_diss = dDiss*h2.dissE*plug\n return (dE-(dT*cp*plug+dT_diss))**2\n \n def ODE_NEQ(self, t, T, dt, TA=None):\n \"\"\"The derivative in temperature with respect to time.\n \n This function accounts for the timescale of dissociation/recombination for bell2018 planets.\n \n Args:\n t (float): The time in days.\n T (ndarray): The temperature map with shape (self.planet.map.npix).\n dt (float): The timestep in days.\n TA (ndarray, optional): The true anomaly in radians (much faster to compute if provided).\n \n Returns:\n ndarray: The derivative in temperature with respect to time.\n \n \"\"\"\n \n dt *= 24.*3600.\n \n plug = self.planet.mlDepth*self.planet.mlDensity\n cp = h2.lte_cp(T, *self.planet.cpParams)\n \n if self.planet.instRedistFrac!=0:\n dEs = ((1-self.planet.instRedistFrac)*self.Fin(t, TA)[0]\n +self.planet.instRedistFrac*np.sum(self.Fin(t, TA))/self.planet.map.npix)\n else:\n dEs = self.Fin(t, TA)[0]\n if self.planet.internalFlux!=0:\n dEs += self.planet.internalFlux \n dEs = (dEs-self.planet.Fout(T))*dt\n \n C_EQ = self.planet.mlDepth*self.planet.mlDensity*cp\n \n dTs = np.zeros_like(T)\n for i in range(dEs.shape[0]):\n for j in range(dEs.shape[1]):\n dTs[i,j] = spopt.minimize(self._find_dT, x0=dEs[i,j]/C_EQ[i,j],\n args=(dEs[i,j], T[i,j], self.planet.map.dissValues[i,j], plug, cp[i,j]),\n tol=0.001*plug*cp[i,j]).x[0]\n dDiss = h2.dissFracApprox(T+dTs, *self.planet.cpParams)-self.planet.map.dissValues\n \n maxDiss = dt*h2.tau_diss(self.planet.mlDepth,T)\n bad = np.where(dDiss > maxDiss)\n dDiss[bad] = maxDiss[bad]\n dTs[bad] = dDiss[bad]*h2.dissE/cp[bad]-dEs[bad]/cp[bad]/plug\n \n maxRecomb = -dt*h2.tau_recomb(self.planet.mlDepth,T)\n bad = np.where(dDiss < maxRecomb)\n dDiss[bad] = maxRecomb[bad]\n dTs[bad] = dDiss[bad]*h2.dissE/cp[bad]-dEs[bad]/cp[bad]/plug\n \n # advect gas\n if self.planet.wind_dlon != 0:\n fMoved = self.planet.wind_dlon*dt\n T_upWind = T[self.planet.upwindLatIndex,self.planet.upwindLonIndex]\n chi_upWind = self.planet.map.dissValues[self.planet.upwindLatIndex,self.planet.upwindLonIndex]\n dT_adv = (T_upWind-T)*fMoved\n dChi_adv = (chi_upWind-self.planet.map.dissValues)*fMoved\n else:\n dT_adv = 0\n dChi_adv = 0\n \n self.planet.map.dissValues += dDiss+dChi_adv\n \n return dTs + dT_adv\n\n def run_model(self, T0=None, t0=0., t1=None, dt=None, verbose=True,\n intermediates=False, progressBar=False, minTemp=0):\n \"\"\"Evolve the planet's temperature map with time.\n \n Args:\n T0 (ndarray): The initial temperature map with shape (self.planet.map.npix).\n If None, use self.planet.map.values instead (default).\n t0 (float, optional): The time corresponding to T0 (default is 0).\n t1 (float, optional): The end point of the run (default is 1 orbital period later).\n dt (float, optional): The time step used to evolve the map (default is 1/100 of the orbital period).\n verbose (bool, optional): Output comments of the progress of the run (default = False)?\n intermediates (bool, optional): Output the map from every time step? Otherwise just returns the last step.\n progressBar (bool, optional): Show a progress bar for the run (nice for long runs).\n minTemp (float, optional): The minimum allowable temperature (can be used to vaguely mimick internal heating).\n \n Returns:\n list: A list of 2 ndarrays containing the time and map of each time step.\n \n \"\"\"\n \n if self.planet.wind_dlon*(dt*24*3600) > 0.5:\n print('Error: Your time step must be sufficiently small so that gas travels less that 0.5 pixels.')\n dtMax = 0.5/self.planet.wind_dlon/24/3600\n dtMax = np.floor(dtMax*1e5)/1e5\n print('Use a time step of '+str(dtMax)+' or less')\n return (None, None)\n \n if T0 is None:\n T0 = self.planet.map.values\n if t1 is None:\n t1 = t0+self.planet.orbit.Porb\n if dt is None:\n dt = self.planet.orbit.Porb/100.\n \n times = (t0 + np.arange(int(np.rint((t1-t0)/dt)))*dt)[:,np.newaxis]\n TAs = self.planet.orbit.true_anomaly(times)[:,:,np.newaxis]\n \n if verbose:\n print('Starting Run')\n maps = T0[np.newaxis,:]\n \n # Soften the blow on the NEQ ODE\n if self.planet.plType == 'bell2018' and self.neq and np.all(self.planet.map.dissValues) == 0.:\n self.planet.map.dissValues = h2.dissFracApprox(T0, *self.planet.cpParams)\n \n if progressBar:\n from tqdm import tnrange\n iterator = tnrange\n else:\n iterator = range\n \n for i in iterator(1, len(times)):\n newMap = (maps[-1]+self.ODE(times[i], maps[-1], dt, TAs[i]))[np.newaxis,:]\n newMap[newMap<minTemp] = minTemp\n if intermediates:\n maps = np.append(maps, newMap, axis=0)\n else:\n maps = newMap\n \n self.planet.map.set_values(maps[-1], times[-1,0])\n if self.planet.plType == 'bell2018' and not self.neq:\n self.planet.map.dissValues = h2.dissFracApprox(self.planet.map.values, *self.planet.cpParams)\n \n if not intermediates:\n times = times[-1]\n \n if verbose:\n print('Done!')\n \n return times, maps\n \n def run_model_tester(self, T0=None, t0=0., t1=None, dt=None, verbose=True,\n intermediates=False, progressBar=False, minTemp=0):\n \"\"\"Evolve the planet's temperature map with time.\n \n Args:\n T0 (ndarray): The initial temperature map with shape (self.planet.map.npix).\n If None, use self.planet.map.values instead (default).\n t0 (float, optional): The time corresponding to T0 (default is 0).\n t1 (float, optional): The end point of the run (default is 1 orbital period later).\n dt (float, optional): The time step used to evolve the map (default is 1/100 of the orbital period).\n verbose (bool, optional): Output comments of the progress of the run (default = False)?\n intermediates (bool, optional): Output the map from every time step? Otherwise just returns the last step.\n progressBar (bool, optional): Show a progress bar for the run (nice for long runs).\n minTemp (float, optional): The minimum allowable temperature (can be used to vaguely mimick internal heating).\n \n Returns:\n list: A list of 2 ndarrays containing the time and map of each time step.\n \n \"\"\"\n tInitial = time.time()\n \n if self.planet.wind_dlon*(dt*24*3600) > 0.5:\n print('Error: Your time step must be sufficiently small so that gas travels less that 0.5 pixels.')\n dtMax = 0.5/self.planet.wind_dlon/24/3600\n dtMax = np.floor(dtMax*1e5)/1e5\n print('Use a time step of '+str(dtMax)+' or less')\n return (None, None)\n \n if T0 is None:\n T0 = self.planet.map.values\n if t1 is None:\n t1 = t0+self.planet.orbit.Porb\n if dt is None:\n dt = self.planet.orbit.Porb/100.\n \n times = (t0 + np.arange(int(np.rint((t1-t0)/dt)))*dt)[:,np.newaxis]\n TAs = self.planet.orbit.true_anomaly(times)[:,:,np.newaxis]\n \n if verbose:\n print('Starting Run')\n maps = T0[np.newaxis,:]\n \n # Soften the blow on the NEQ ODE\n if self.planet.plType == 'bell2018' and self.neq and np.all(self.planet.map.dissValues) == 0.:\n self.planet.map.dissValues = h2.dissFracApprox(T0, *self.planet.cpParams)\n \n if progressBar:\n from tqdm import tnrange\n iterator = tnrange\n else:\n iterator = range\n \n for i in iterator(1, len(times)):\n newMap = (maps[-1]+self.ODE(times[i], maps[-1], dt, TAs[i]))[np.newaxis,:]\n newMap[newMap<minTemp] = minTemp\n if intermediates:\n maps = np.append(maps, newMap, axis=0)\n else:\n maps = newMap\n \n self.planet.map.set_values(maps[-1], times[-1,0])\n if self.planet.plType == 'bell2018' and not self.neq:\n self.planet.map.dissValues = h2.dissFracApprox(self.planet.map.values, *self.planet.cpParams)\n \n if not intermediates:\n times = times[-1]\n \n if verbose:\n print('Done!')\n \n tFinal = time.time()\n ttc = tFinal - tInitial\n \n return times, maps, ttc\n \n def plot_lightcurve(self, t=None, T=None, bolo=True, tStarBright=None, wav=4.5e-6, allowReflect=False, allowThermal=True):\n \"\"\"A convenience plotting routine to show the planet's phasecurve.\n\n Args:\n t (ndarray, optional): The time in days with shape (t.size,1). If None, will use 1000\n time steps around orbit.\n T (ndarray, optional): The temperature map in K with shape (1, self.planet.map.npix)\n if the map is constant or (t.size,self.planet.map.npix). If None, use\n self.planet.map.values instead.\n bolo (bool, optional): Determines whether computed flux is bolometric (True, default)\n or wavelength dependent (False).\n tBright (ndarray): The brightness temperature to use if bolo==False.\n wav (float, optional): The wavelength to use if bolo==False.\n allowReflect (bool, optional): Account for the contribution from reflected light.\n allowThermal (bool, optional): Account for the contribution from thermal emission.\n\n Returns:\n figure: The figure containing the plot.\n\n \"\"\"\n \n if self.planet.orbit.e != 0. and (T is None or t is None):\n print('Warning: Maps and times must be entered for eccentric planets. Failing to do so'+\n ' will result in non-sensical lightcurves.')\n return None\n \n if t is None:\n t = self.planet.map.time+np.linspace(0., self.planet.orbit.Porb, 1000)\n else:\n t = t.flatten()\n x = self.get_phase(t)\n \n t = t.reshape(-1,1,1)\n \n if self.planet.orbit.e != 0:\n x *= self.planet.orbit.Porb\n \n if T is None:\n T = self.planet.map.values[np.newaxis,:]\n \n order = np.argsort(x)\n x = x[order]\n t = t[order]\n if T.shape[0] != 1:\n T = T[order]\n \n lc = self.lightcurve(t, T, bolo=bolo, tStarBright=tStarBright, wav=wav, allowReflect=allowReflect,\n allowThermal=allowThermal)*1e6\n \n plt.plot(x, lc)\n if self.planet.orbit.e == 0:\n plt.gca().axvline(self.get_phase_eclipse(), c='k', ls='--', label=r'$\\rm Eclipse$')\n if self.planet.orbit.e != 0:\n plt.gca().axvline(self.planet.orbit.t_ecl, c='k', ls='--', label=r'$\\rm Eclipse$')\n plt.gca().axvline(self.planet.orbit.t_peri,\n c='red', ls='-.', lw=2, label=r'$\\rm Periastron$')\n\n plt.legend(loc=8, bbox_to_anchor=(0.5,1), ncol=2)\n plt.ylabel(r'$F_p/F_*\\rm~(ppm)$')\n if self.planet.orbit.e == 0:\n plt.xlabel(r'$\\rm Orbital~Phase$')\n else:\n plt.xlabel(r'$\\rm Time~from~Transit~(days)$')\n if self.planet.orbit.e != 0:\n plt.xlim(0, self.planet.Porb)\n else:\n plt.xlim(0, 1)\n plt.ylim(0)\n return plt.gcf()\n \n def plot_tempcurve(self, t=None, T=None, bolo=True, tStarBright=None, wav=4.5e-6, allowReflect=False, allowThermal=True):\n \"\"\"A convenience plotting routine to show the planet's phasecurve in units of temperature.\n \n Args:\n t (ndarray, optional): The time in days with shape (t.size,1). If None, will use 1000\n time steps around orbit. Must be provided for eccentric planets.\n T (ndarray, optional): The temperature map in K with shape (1, self.planet.map.npix) if\n the map is constant or (t.size,self.planet.map.npix). If None, use\n self.planet.map.values instead. Must be provided for eccentric planets.\n bolo (bool, optional): Determines whether computed flux is bolometric (True, default)\n or wavelength dependent (False).\n tBright (ndarray): The brightness temperature to use if bolo==False.\n wav (float, optional): The wavelength to use if bolo==False.\n allowReflect (bool, optional): Account for the contribution from reflected light.\n allowThermal (bool, optional): Account for the contribution from thermal emission.\n \n Returns:\n figure: The figure containing the plot.\n \n \"\"\"\n \n if self.planet.orbit.e != 0. and (T is None or t is None):\n print('Warning: Maps and times must be entered for eccentric planets. Failing to do so'+\n ' will result in non-sensical lightcurves.')\n return None\n \n if t is None:\n t = self.planet.map.time+np.linspace(0., self.planet.orbit.Porb, 1000)\n else:\n t = t.flatten()\n \n x = self.get_phase(t)\n \n if self.planet.orbit.e != 0:\n x *= self.planet.orbit.Porb\n \n if T is None:\n T = self.planet.map.values[np.newaxis,:]\n \n order = np.argsort(x)\n x = x[order]\n t = t[order]\n if T.shape[0] != 1:\n T = T[order]\n \n lc = self.lightcurve(t, T, bolo=bolo, tStarBright=tStarBright, wav=wav,\n allowReflect=allowReflect, allowThermal=allowThermal)\n tc = self.invert_lc(lc, bolo=bolo, tStarBright=tStarBright, wav=wav)\n \n plt.plot(x, tc)\n \n if self.planet.orbit.e == 0:\n plt.gca().axvline(self.get_phase_eclipse(), c='k', ls='--', label=r'$\\rm Eclipse$')\n if self.planet.orbit.e != 0:\n plt.gca().axvline(self.planet.orbit.t_ecl, c='k', ls='--', label=r'$\\rm Eclipse$')\n plt.gca().axvline(self.planet.orbit.t_peri,\n c='red', ls='-.', lw=2, label=r'$\\rm Periastron$')\n\n plt.legend(loc=8, bbox_to_anchor=(0.5,1), ncol=2)\n if bolo:\n plt.ylabel(r'$T_{\\rm eff, hemi, apparent}\\rm~(K)$')\n else:\n plt.ylabel(r'$T_{\\rm b, hemi, apparent}\\rm~(K)$')\n if self.planet.orbit.e == 0:\n plt.xlabel(r'$\\rm Orbital~Phase$')\n else:\n plt.xlabel(r'$\\rm Time~from~Transit~(days)$')\n if self.planet.orbit.e != 0:\n plt.xlim(0, self.planet.Porb)\n else:\n plt.xlim(0, 1)\n plt.ylim(0)\n return plt.gcf()\n"
] | [
[
"numpy.ones_like",
"matplotlib.pyplot.xlim",
"numpy.rint",
"numpy.where",
"numpy.expm1",
"matplotlib.pyplot.gcf",
"numpy.zeros_like",
"numpy.log",
"numpy.sqrt",
"numpy.append",
"matplotlib.pyplot.gca",
"scipy.optimize.minimize",
"numpy.array",
"numpy.argsort",
"numpy.floor",
"numpy.sum",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel",
"numpy.all",
"numpy.linspace"
]
] |
gohsyi/secure_connectivity | [
"cf2575f29aa82b4d77695079be578973a660016f"
] | [
"models/a2c/runner.py"
] | [
"import numpy as np\n\nfrom models.a2c.utils import discount_with_dones\n\n\nclass Runner(object):\n \"\"\"\n We use this class to generate batches of experiences\n\n __init__:\n - Initialize the runner\n\n run():\n - Make a mini batch of experiences\n \"\"\"\n\n def __init__(self, env, d_model, a_model, bl_d_model, bl_a_model, nsteps=5, gamma=0.99):\n self.d_model = d_model\n self.a_model = a_model\n self.bl_d_model = bl_d_model\n self.bl_a_model = bl_a_model\n\n self.env = env\n self.obs = env.reset()\n self.nsteps = nsteps\n self.gamma = gamma\n\n def run(self):\n \"\"\"\n Make a mini batch of experiences\n\n Returns\n -------\n mb_obs:\n (batch_size x ob_size), observations of both defender and attacker\n\n (mb_d_rewards, mb_a_rewards):\n (batch_size x 1, batch_size x 1), rewards of attacker\n\n (mb_d_actions, mb_a_actions):\n (batch_size x 1, batch_size x 1), actions of attacker\n\n (mb_d_values, mb_a_values):\n (batch_size x 1, batch_size x 1), estimated value of attacker\n\n epinfos:\n other infos (useless for now)\n \"\"\"\n\n # We initialize the lists that will contain the mb of experiences\n mb_obs, mb_dones = [],[]\n mb_d_rewards, mb_a_rewards, mb_bl_d_rewards, mb_bl_a_rewards = [],[],[],[]\n mb_d_actions, mb_a_actions, mb_bl_d_actions, mb_bl_a_actions = [],[],[],[]\n mb_d_values, mb_a_values, mb_bl_d_values, mb_bl_a_values = [],[],[],[]\n\n for n in range(self.nsteps):\n # Given observations, take action and value (V(s))\n # We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init\n d_actions, d_values = self.d_model.step(self.obs)\n a_actions, a_values = self.a_model.step(self.obs)\n bl_d_actions, _ = self.bl_d_model.step(self.obs)\n bl_a_actions, _ = self.bl_a_model.step(self.obs)\n\n d_actions = np.squeeze(d_actions)\n a_actions = np.squeeze(a_actions)\n d_values = np.squeeze(d_values)\n a_values = np.squeeze(a_values)\n bl_d_actions = np.squeeze(bl_d_actions)\n bl_a_actions = np.squeeze(bl_a_actions)\n\n # Append the experiences\n mb_obs.append(np.copy(self.obs))\n mb_d_actions.append(d_actions)\n mb_a_actions.append(a_actions)\n mb_d_values.append(d_values)\n mb_a_values.append(a_values)\n mb_bl_d_actions.append(d_actions)\n mb_bl_a_actions.append(a_actions)\n mb_bl_d_values.append(d_values)\n mb_bl_a_values.append(a_values)\n\n # Take actions in env and look the results\n bl_d_rewards, a_rewards = self.env.eval((bl_d_actions, a_actions))\n d_rewards, bl_a_rewards = self.env.eval((d_actions, bl_a_actions))\n d_rewards, a_rewards = self.env.eval((d_actions, a_actions))\n\n obs, rewards, dones, infos = self.env.step((d_actions, a_actions))\n self.obs = obs\n mb_d_rewards.append(d_rewards)\n mb_a_rewards.append(a_rewards)\n mb_bl_d_rewards.append(bl_d_rewards)\n mb_bl_a_rewards.append(bl_a_rewards)\n\n # TODO add bootstrap\n # if self.gamma > 0.0:\n # # Discount/bootstrap off value fn for defender\n # last_values = self.d_model.value(self.obs).tolist()\n # for n, (rewards, dones, value) in enumerate(zip(mb_d_rewards, mb_dones, last_values)):\n # rewards = rewards.tolist()\n # dones = dones.tolist()\n # if dones[-1] == 0:\n # rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1]\n # else:\n # rewards = discount_with_dones(rewards, dones, self.gamma)\n #\n # mb_d_rewards[n] = rewards\n #\n # # Discount/bootstrap off value fn for attacker\n # last_values = self.a_model.value(self.obs).tolist()\n # for n, (rewards, dones, value) in enumerate(zip(mb_a_rewards, mb_dones, last_values)):\n # rewards = rewards.tolist()\n # dones = dones.tolist()\n # if dones[-1] == 0:\n # rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1]\n # else:\n # rewards = discount_with_dones(rewards, dones, self.gamma)\n #\n # mb_a_rewards[n] = rewards\n\n return np.array(mb_obs), \\\n (np.array(mb_d_rewards), np.array(mb_a_rewards)), \\\n (np.array(mb_d_actions), np.array(mb_a_actions)), \\\n (np.array(mb_d_values), np.array(mb_a_values)), \\\n (np.array(mb_bl_d_rewards), np.array(mb_bl_a_rewards))\n"
] | [
[
"numpy.array",
"numpy.copy",
"numpy.squeeze"
]
] |
sethmerkel/qiskit-ignis | [
"92ba61a329cf9e1a871d8bcc7eace2d2b5951253"
] | [
"qiskit/ignis/verification/accreditation/qotp.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n# pylint: disable=no-member,invalid-name\n\n\n\"\"\"\nQuantum one-time pad\n\"\"\"\n\n\nimport numpy as np\nfrom qiskit import QuantumCircuit\nfrom qiskit.converters.circuit_to_dag import circuit_to_dag\nfrom qiskit.converters.dag_to_circuit import dag_to_circuit\nfrom qiskit.compiler import transpile\nfrom qiskit.exceptions import QiskitError\n\n\ndef layer_parser(circ, two_qubit_gate='cx', coupling_map=None):\n \"\"\"\n Tranforms general circuits into a nice form for a qotp.\n\n Args:\n circ (QuantumCircuit): A generic quantum circuit\n two_qubit_gate (str): a flag as to which 2 qubit\n gate to compile with, can be cx or cz\n coupling_map (list): some particular device topology as list\n of list (e.g. [[0,1],[1,2],[2,0]])\n Returns:\n dict: A dictionary of the parsed layers with the following keys:\n ``singlequbit_layers`` (lsit): a list of circuits describing\n the single qubit gates\n ``cz_layers`` (list): a list of circuits describing the cz layers\n ``meas_layer`` (QuantumCircuit): a circuit describing the final measurement\n\n Raises:\n QiskitError: If a circuit element is not implemented in qotp\n \"\"\"\n\n # transpile to single qubits and cx\n # TODO: replace cx with cz when that is available\n circ_internal = transpile(circ,\n optimization_level=2,\n basis_gates=['u1', 'u2', 'u3', 'cx'],\n coupling_map=coupling_map)\n # quantum and classial registers\n qregs = circ_internal.qregs[0]\n cregs = circ_internal.cregs[0]\n # conatiners for the eventual output passed to the accred code\n singlequbitlayers = [QuantumCircuit(qregs, cregs),\n QuantumCircuit(qregs, cregs)]\n twoqubitlayers = [QuantumCircuit(qregs, cregs)]\n measlayer = QuantumCircuit(qregs, cregs)\n # some flags for simplicity\n current2qs = []\n # loop through circuit (best to use the dag object)\n dag_internal = circuit_to_dag(circ_internal)\n for dag_layer in dag_internal.layers():\n circuit_layer = dag_to_circuit(dag_layer['graph'])\n for circelem, qsub, csub in circuit_layer:\n n = circelem.name\n if n == \"barrier\":\n # if a barrier separates any two qubit gates\n # start a new layer\n if current2qs != []:\n singlequbitlayers.append(QuantumCircuit(qregs, cregs))\n twoqubitlayers.append(QuantumCircuit(qregs, cregs))\n current2qs = []\n singlequbitlayers[-2].append(circelem, qsub, csub)\n elif n in ('u1', 'u2', 'u3'):\n # single qubit gate\n q = qsub[0]\n if q in current2qs:\n singlequbitlayers[-1].append(circelem, qsub, csub)\n else:\n singlequbitlayers[-2].append(circelem, qsub, csub)\n elif n == \"cx\":\n # cx indices\n q_0 = qsub[0]\n q_1 = qsub[1]\n # check if new cnot satisfies overlap criteria\n if q_0 in current2qs or q_1 in current2qs:\n singlequbitlayers.append(QuantumCircuit(qregs, cregs))\n twoqubitlayers.append(QuantumCircuit(qregs, cregs))\n current2qs = []\n if two_qubit_gate == 'cx':\n # append cx\n twoqubitlayers[-1].cx(q_0, q_1)\n elif two_qubit_gate == 'cz':\n # append and correct to cz with h gates\n twoqubitlayers[-1].cz(q_0, q_1)\n singlequbitlayers[-1].h(qsub[1])\n singlequbitlayers[-2].h(qsub[1])\n else:\n raise QiskitError(\"Two qubit gate {0}\".format(two_qubit_gate)\n + \" is not implemented in qotp\")\n # add to current\n current2qs.append(q_0)\n current2qs.append(q_1)\n elif n == \"measure\":\n measlayer.append(circelem, qsub, csub)\n else:\n raise QiskitError(\"Circuit element {0}\".format(n)\n + \" is not implemented in qotp\")\n if current2qs == []:\n del singlequbitlayers[-1]\n del twoqubitlayers[-1]\n for ind, circlayer in enumerate(singlequbitlayers):\n singlequbitlayers[ind] = transpile(circlayer,\n basis_gates=['u1', 'u2', 'u3'])\n parsedlayers = {'singlequbitlayers': singlequbitlayers,\n 'twoqubitlayers': twoqubitlayers,\n 'measlayer': measlayer,\n 'twoqubitgate': two_qubit_gate,\n 'qregs': qregs,\n 'cregs': cregs}\n return parsedlayers\n\n\ndef QOTP_fromlayers(layers, rng):\n \"\"\"\n An intermediate step of a qotp in which we've converted the circuit\n to layers and only return a single pad or compilation\n\n Args:\n layers (dict): parsed layers from the layer parser\n rng (RNG): a random number generator\n Returns:\n tuple: a tuple of type (``qotp_circ``, ``qotp_postp``) where:\n ``qotp_circ`` (QuantumCircuit): output onetime pad circ\n ``qotp_postp`` (list): correction as liist of bits\n\n Raises:\n QiskitError: If a circuit element is not implemented in qotp\n \"\"\"\n\n # make some circuits\n qregs = layers['qregs']\n cregs = layers['cregs']\n twoqubitgate = layers['twoqubitgate']\n qotp_circ = QuantumCircuit(qregs, cregs)\n temp_circ = QuantumCircuit(qregs, cregs)\n\n # initial z gates after prep\n paulizs = rng.randint(2, size=len(qregs))\n for qind, q in enumerate(qregs):\n if paulizs[qind]:\n temp_circ.z(q)\n # step through layers\n for lnum, gates2q in enumerate(layers['twoqubitlayers']):\n # add single qubit gates to temp circuit\n temp_circ = temp_circ+layers['singlequbitlayers'][lnum]\n # generate and add single qubit paulis\n paulizs = rng.randint(2, size=len(qregs))\n paulixs = rng.randint(2, size=len(qregs))\n for qind, q in enumerate(qregs):\n if paulizs[qind]:\n temp_circ.z(q)\n if paulixs[qind]:\n temp_circ.x(q)\n # add to circuit and reset temp\n temp_circ = transpile(temp_circ,\n basis_gates=['u1', 'u2', 'u3'])\n qotp_circ = qotp_circ+temp_circ\n temp_circ = QuantumCircuit(qregs, cregs)\n # add two qubit layers and get indices for 2qgates\n qotp_circ.barrier()\n qotp_circ = qotp_circ+gates2q\n qotp_circ.barrier()\n twoqindices = []\n for _, qsub, _ in gates2q:\n twoqindices.append([qsub[0].index, qsub[1].index])\n # update Paulis\n for inds in twoqindices:\n if twoqubitgate == 'cx':\n # iz -> zz and xi -> xx\n paulizs[inds[0]] = (paulizs[inds[0]]+paulizs[inds[1]]) % 2\n paulixs[inds[1]] = (paulixs[inds[1]]+paulixs[inds[0]]) % 2\n elif twoqubitgate == 'cz':\n # ix -> zx and xi -> xz\n paulizs[inds[0]] = (paulizs[inds[0]]+paulixs[inds[1]]) % 2\n paulizs[inds[1]] = (paulizs[inds[1]]+paulixs[inds[0]]) % 2\n else:\n raise QiskitError(\"Two qubit gate {0}\".format(twoqubitgate)\n + \"is not implemented in qotp\")\n for qind, q in enumerate(qregs):\n if paulixs[qind]:\n temp_circ.x(q)\n if paulizs[qind]:\n temp_circ.z(q)\n # add final single qubit layer\n temp_circ = temp_circ+layers['singlequbitlayers'][-1]\n # add final Paulis to create the one time pad\n paulizs = rng.randint(2, size=len(qregs))\n paulixs = rng.randint(2, size=len(qregs))\n for qind, q in enumerate(qregs):\n if paulizs[qind]:\n temp_circ.z(q)\n if paulixs[qind]:\n temp_circ.x(q)\n # add to circuit\n temp_circ = transpile(temp_circ,\n basis_gates=['u1', 'u2', 'u3'])\n qotp_circ = qotp_circ+temp_circ\n # post operations\n qotp_postp = np.flip(paulixs)\n # measurements\n qotp_circ = qotp_circ+layers['measlayer']\n return qotp_circ, qotp_postp\n\n\ndef QOTP(circ, num, two_qubit_gate='cx', coupling_map=None, seed=None):\n \"\"\"\n Performs a QOTP (or random compilation) on a generic circuit.\n\n This is essentially the same protocol as used in\n randomized compiling, but follows the methods in\n Samuele Ferracin, Theodoros Kapourniotis and Animesh Datta\n New Journal of Physics, Volume 21, November 2019\n https://iopscience.iop.org/article/10.1088/1367-2630/ab4fd6\n\n Args:\n circ (QuantumCircuit): A generic quantum circuit\n num (int): the number of one-time pads to return\n two_qubit_gate (string): a flag as to which 2 qubit\n gate to compile with, can be cx or cz\n coupling_map (list): a particular device topology as a\n list of list (e.g. [[0,1],[1,2],[2,0]])\n seed (int): seed to the random number generator\n Returns:\n tuple: a tuple of type (``qotp_circ``, ``qotp_postp``) where:\n qotp_circs (list): a list of circuits with qotp applied\n qotp_postps (list): a list of arrays specifying the one time pads\n \"\"\"\n rng = np.random.RandomState(seed)\n # break into layers\n layers = layer_parser(circ,\n two_qubit_gate=two_qubit_gate,\n coupling_map=coupling_map)\n # output lists\n qotp_circs = []\n qotp_postps = []\n # generate circuits and postops\n for _ in range(num):\n circ, postp = QOTP_fromlayers(layers, rng)\n qotp_circs.append(circ)\n qotp_postps.append(postp)\n return qotp_circs, qotp_postps\n\n\ndef QOTPCorrectString(qotp_string, qotp_postp):\n \"\"\"\n Corrects a measurement string, shifting the qotp\n\n Args:\n qotp_string (str): a measurement output string\n qotp_postp (list): a binary list denoting the one time pad\n Returns:\n dict: the corrected counts dict\n \"\"\"\n corrected_string = [1 if k == \"1\" else 0 for k in qotp_string]\n corrected_string = [(k+s) % 2 for k, s in zip(corrected_string, qotp_postp)]\n corrected_string = ''.join([str(k) for k in corrected_string])\n return corrected_string\n\n\ndef QOTPCorrectCounts(qotp_counts, qotp_postp):\n \"\"\"\n Corrects a dictionary of results, shifting the qotp\n\n Args:\n qotp_counts (dict): a dict of exp counts\n qotp_postp (list): a binary list denoting the one time pad\n Returns:\n dict: the corrected counts dict\n \"\"\"\n\n counts_out = {}\n for key, val in qotp_counts.items():\n keyshift = QOTPCorrectString(key, qotp_postp)\n counts_out[keyshift] = val\n return counts_out\n"
] | [
[
"numpy.flip",
"numpy.random.RandomState"
]
] |
andsor/pysimoa | [
"8734c062fa4a21b94d0e27ef460f3d8f8c3684da"
] | [
"simoa/test/test_mser.py"
] | [
"# -*- coding: utf-8 -*-\n\n'''\n\n Copyright 2015 The pysimoa Developers\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n'''\n\n\nimport numpy as np\nimport simoa.mser\n\n\ndef test_mser5():\n data = np.random.rand(1000)\n ret = simoa.compute_mser5_interval(data)\n env = ret.env\n assert env['Z_j'].size == 200\n assert env['Z_j'][1] == data[5:10].mean()\n # assert env[simoa.mser.MSER5_NEW_BATCH_MEANS_KEY].size == 20\n"
] | [
[
"numpy.random.rand"
]
] |
prakamya-mishra/Road-Network-Mapping-from-Aerial-Images | [
"743be76a241fc41bcf61bd7519f0796370e39d34"
] | [
"roadforesttest.py"
] | [
"import pandas as pd\nimport houghtest\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nimport cv2\nimport numpy as np\nimport pickle\n\ndef main(img_path_or):\n\ttrained_model=pickle.load(open(\"trained_model_25509.sav\",'rb'))\n\t\"\"\"\n\tHEADERS = [\"cn\",\"b\",\"g\",\"r\",\"bl\",\"gl\",\"rl\",\"br\",\"gr\",\"rr\",\"bu\",\"gu\",\"ru\",\"bul\",\"gul\",\"rul\",\n\t\t \"bur\",\"gur\",\"rur\",\"bdl\",\"gdl\",\"rdl\",\"bdr\",\"gdr\",\"rdr\",\"bd\",\"gd\",\"rd\",\"road\"]\n\tprint sorted(zip(trained_model.feature_importances_,HEADERS),reverse=True)\n\t\"\"\"\n\tcandidates = [103,110,121,122,124,125,127,128,129,133,137,138,145,147,153,157,159,161,\n\t\t\t 164,165,167,170]\n\tfor z in range(0,1):\n\t\tmedian_im = cv2.imread(img_path_or)\n\t\timg_copy = median_im.copy()\n\n\t\t#median_im = cv2.medianBlur(median_im, 3)\n\t\t#median_im = image\n\t\tfor y in range(1, 318, 1): #4611 4778\n\t\t\tfor c in range(1, 478, 1):\n\t\t\t\tb = median_im.item(y, c, 0)\n\t\t\t\tg = median_im.item(y, c, 1)\n\t\t\t\tr = median_im.item(y, c, 2)\n\t\t\t\tbl = median_im.item(y, c - 1, 0)\n\t\t\t\tgl = median_im.item(y, c - 1, 1)\n\t\t\t\trl = median_im.item(y, c - 1, 2)\n\t\t\t\tbr = median_im.item(y, c + 1, 0)\n\t\t\t\tgr = median_im.item(y, c + 1, 1)\n\t\t\t\trr = median_im.item(y, c + 1, 2)\n\t\t\t\tbu = median_im.item(y - 1, c, 0)\n\t\t\t\tgu = median_im.item(y - 1, c, 1)\n\t\t\t\tru = median_im.item(y - 1, c, 2)\n\t\t\t\tbul = median_im.item(y - 1, c - 1, 0)\n\t\t\t\tgul = median_im.item(y - 1, c - 1, 1)\n\t\t\t\trul = median_im.item(y - 1, c - 1, 2)\n\t\t\t\tbur = median_im.item(y - 1, c + 1, 0)\n\t\t\t\tgur = median_im.item(y - 1, c + 1, 1)\n\t\t\t\trur = median_im.item(y - 1, c + 1, 2)\n\t\t\t\tbdl = median_im.item(y + 1, c - 1, 0)\n\t\t\t\tgdl = median_im.item(y + 1, c - 1, 1)\n\t\t\t\trdl = median_im.item(y + 1, c - 1, 2)\n\t\t\t\tbdr = median_im.item(y + 1, c + 1, 0)\n\t\t\t\tgdr = median_im.item(y + 1, c + 1, 1)\n\t\t\t\trdr = median_im.item(y + 1, c + 1, 2)\n\t\t\t\tbd = median_im.item(y + 1, c, 0)\n\t\t\t\tgd = median_im.item(y + 1, c, 1)\n\t\t\t\trd = median_im.item(y + 1, c, 2)\n\t\t\t\tnew_prediction = trained_model.predict(np.array([[b, g, r, bl, gl, rl, br, gr, rr, bu, gu, ru, bul, gul, rul, bur, gur, rur, bdl, gdl, rdl, bdr, gdr, rdr, bd, gd, rd]]))\n\t\t\t\tif new_prediction > 0.5:\n\t\t\t\t\tpred = 1\n\t\t\t\telse:\n\t\t\t\t\tpred = 0\n\t\t\t\tif pred == 1:\n\t\t\t\t\timg_copy[y, c] = (255, 255, 0)\n\t\tcv2.imwrite(\"images/out1.png\",img_copy)\n\t\tlength = houghtest.main(\"images/out1.png\",img_path_or)\n\n\t\treturn length\n\t#cv2.imshow(\"pred\", img_copy)\n\t#cv2.imwrite(\"pred8superr.jpg\", img_copy)\n\t#cv2.waitKey(0)\n\nif __name__==\"__main__\":\n\tmain()\n"
] | [
[
"numpy.array"
]
] |
ababino/babino2020masks | [
"06964ecc268fe573140a67bbf13e78495858de84"
] | [
"babino2020masks/counterfactual.py"
] | [
"# AUTOGENERATED! DO NOT EDIT! File to edit: 02_counterfactual.ipynb (unless otherwise specified).\n\n__all__ = []\n\n# Cell\nfrom .lasso import *\nimport statsmodels.api as sm\nfrom fastcore.all import *\nfrom statsmodels.sandbox.regression.predstd import wls_prediction_std\nimport numpy as np\n\n# Cell\n@patch\ndef counterfactual(self:LassoICSelector):\n exog_full = self.transform_to_ols(self.X)\n ind = exog_full[:, -2]<2\n y_sub = self.y[ind]\n exog_sub = exog_full[ind,:]\n exog_sub = np.hstack([exog_sub[:, :-2],exog_sub[:,-1:]])\n ols = sm.OLS(y_sub, exog_sub)\n res = ols.fit()\n exog = np.hstack([exog_full[:, :-2],exog_full[:,-1:]])\n yhat = res.predict(exog)\n yhat_orig = self.predict(exog_full)\n\n odds_cf = np.exp(yhat)\n (yhat_std, yhat_l, yhat_u) = wls_prediction_std(res, exog)\n oddshat_std = odds_cf*yhat_std\n return odds_cf, odds_cf - 2*oddshat_std, odds_cf + 2*oddshat_std"
] | [
[
"numpy.hstack",
"numpy.exp"
]
] |
AnTao97/UnsupervisedPointCloudSegmentation | [
"9bcf0bdf3b1ae62421d9202eb7c0b014d6a69c02"
] | [
"classification.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author: An Tao\n@Contact: [email protected]\n@File: reconstruction.py\n@Time: 2020/1/2 10:26 AM\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport shutil\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nimport sklearn.metrics as metrics\n\nfrom tensorboardX import SummaryWriter\n\nfrom model import ClassificationNet\nfrom dataset import Dataset\nfrom utils import Logger\n\n\nclass Classification(object):\n def __init__(self, args):\n self.dataset_name = args.dataset\n if args.epochs != None:\n self.epochs = args.epochs\n else:\n self.epochs = 250\n self.batch_size = args.batch_size\n self.snapshot_interval = args.snapshot_interval\n self.no_cuda = args.no_cuda\n self.model_path = args.model_path\n self.no_scheduler = args.no_scheduler\n\n # create exp directory\n file = [f for f in args.model_path.split('/')]\n if args.exp_name != None:\n self.experiment_id = \"Classify_\" + args.exp_name\n elif file[-2] == 'models':\n self.experiment_id = file[-3]\n else:\n self.experiment_id = \"Classify\" + time.strftime('%m%d%H%M%S')\n snapshot_root = 'snapshot/%s' % self.experiment_id\n tensorboard_root = 'tensorboard/%s' % self.experiment_id\n self.save_dir = os.path.join(snapshot_root, 'models/')\n self.tboard_dir = tensorboard_root\n\n # check arguments\n if self.model_path == '':\n if not os.path.exists(self.save_dir):\n os.makedirs(self.save_dir)\n else:\n choose = input(\"Remove \" + self.save_dir + \" ? (y/n)\")\n if choose == \"y\":\n shutil.rmtree(self.save_dir)\n os.makedirs(self.save_dir)\n else:\n sys.exit(0)\n if not os.path.exists(self.tboard_dir):\n os.makedirs(self.tboard_dir)\n else:\n shutil.rmtree(self.tboard_dir)\n os.makedirs(self.tboard_dir)\n sys.stdout = Logger(os.path.join(snapshot_root, 'log.txt'))\n self.writer = SummaryWriter(log_dir=self.tboard_dir)\n\n # print args\n print(str(args))\n\n # get gpu id\n gids = ''.join(args.gpu.split())\n self.gpu_ids = [int(gid) for gid in gids.split(',')]\n self.first_gpu = self.gpu_ids[0]\n\n # generate dataset\n self.train_dataset = Dataset(\n root=args.dataset_root,\n dataset_name=args.dataset,\n split='all',\n num_points=args.num_points,\n random_translate=True,\n random_rotate=args.use_rotate,\n random_jitter=args.use_jitter\n )\n self.train_loader = torch.utils.data.DataLoader(\n self.train_dataset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.workers\n )\n print(\"Training set size:\", self.train_loader.dataset.__len__())\n\n # initialize model\n self.model = ClassificationNet(args)\n if self.model_path != '':\n self._load_pretrain(args.model_path)\n\n # load model to gpu\n if not self.no_cuda:\n if len(self.gpu_ids) != 1: # multiple gpus\n self.model = torch.nn.DataParallel(self.model.cuda(self.first_gpu), self.gpu_ids)\n else:\n self.model = self.model.cuda(self.gpu_ids[0])\n \n # initialize optimizer\n self.parameter = self.model.parameters()\n if self.no_scheduler == False:\n self.optimizer = optim.SGD(self.parameter, lr=0.1, weight_decay=1e-4)\n self.scheduler = CosineAnnealingLR(self.optimizer, self.epochs, eta_min=1e-3)\n else:\n self.optimizer = optim.SGD(self.parameter, lr=0.01, weight_decay=1e-4)\n\n\n def run(self):\n self.train_hist = {\n 'loss': [],\n 'per_epoch_time': [],\n 'total_time': []\n }\n best_loss = 1000000000\n print('Training start!!')\n start_time = time.time()\n self.model.train()\n if self.model_path != '':\n start_epoch = self.model_path[-7:-4]\n if start_epoch[0] == '_':\n start_epoch = start_epoch[1:]\n start_epoch = int(start_epoch)\n else:\n start_epoch = 0\n for epoch in range(start_epoch, self.epochs):\n loss = self.train_epoch(epoch)\n \n # save snapeshot\n if (epoch + 1) % self.snapshot_interval == 0:\n self._snapshot(epoch + 1)\n if loss < best_loss:\n best_loss = loss\n self._snapshot('best')\n \n # save tensorboard\n if self.writer:\n self.writer.add_scalar('Train Loss', self.train_hist['loss'][-1], epoch)\n self.writer.add_scalar('Learning Rate', self._get_lr(), epoch)\n \n # finish all epoch\n self._snapshot(epoch + 1)\n if loss < best_loss:\n best_loss = loss\n self._snapshot('best')\n self.train_hist['total_time'].append(time.time() - start_time)\n print(\"Avg one epoch time: %.2f, total %d epochs time: %.2f\" % (np.mean(self.train_hist['per_epoch_time']),\n self.epochs, self.train_hist['total_time'][0]))\n print(\"Training finish!... save training results\")\n\n\n def train_epoch(self, epoch):\n epoch_start_time = time.time()\n loss_buf = []\n train_pred = []\n train_true = []\n num_batch = int(len(self.train_loader.dataset) / self.batch_size)\n for iter, (pts, label) in enumerate(self.train_loader):\n if pts.size(0) == 1:\n continue\n if not self.no_cuda:\n pts = pts.cuda(self.first_gpu)\n label = label.cuda(self.first_gpu)\n\n # forward\n self.optimizer.zero_grad()\n output, _ = self.model(pts)\n\n # loss\n if len(self.gpu_ids) != 1: # multiple gpus\n loss = self.model.module.get_loss(output, label)\n else:\n loss = self.model.get_loss(output, label)\n\n # backward\n loss.backward()\n self.optimizer.step()\n loss_buf.append(loss.detach().cpu().numpy())\n\n preds = output.max(dim=1)[1]\n train_true.append(label.view(-1).cpu().numpy())\n train_pred.append(preds.detach().cpu().numpy())\n\n # finish one epoch\n if self.no_scheduler == False:\n self.scheduler.step()\n epoch_time = time.time() - epoch_start_time\n self.train_hist['per_epoch_time'].append(epoch_time)\n self.train_hist['loss'].append(np.mean(loss_buf))\n train_true = np.concatenate(train_true)\n train_pred = np.concatenate(train_pred)\n print(\"Epoch %d: Loss %.6f, train acc %.6f, train avg acc %.6f, time %.4fs\" % (epoch+1,\n np.mean(loss_buf),\n metrics.accuracy_score(\n train_true, train_pred),\n metrics.balanced_accuracy_score(\n train_true, train_pred),\n epoch_time))\n return np.mean(loss_buf)\n\n\n def _snapshot(self, epoch):\n state_dict = self.model.state_dict()\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for key, val in state_dict.items():\n if key[:6] == 'module':\n name = key[7:] # remove 'module.'\n else:\n name = key\n new_state_dict[name] = val\n save_dir = os.path.join(self.save_dir, self.dataset_name)\n torch.save(new_state_dict, save_dir + \"_\" + str(epoch) + '.pkl')\n print(f\"Save model to {save_dir}_{str(epoch)}.pkl\")\n\n\n def _load_pretrain(self, pretrain):\n state_dict = torch.load(pretrain, map_location='cpu')\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for key, val in state_dict.items():\n if key[:6] == 'module':\n name = key[7:] # remove 'module.'\n else:\n name = key\n new_state_dict[name] = val\n self.model.load_state_dict(new_state_dict)\n print(f\"Load model from {pretrain}\")\n\n\n def _get_lr(self, group=0):\n return self.optimizer.param_groups[group]['lr']\n"
] | [
[
"numpy.concatenate",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.optim.SGD",
"sklearn.metrics.balanced_accuracy_score",
"numpy.mean",
"sklearn.metrics.accuracy_score",
"torch.utils.data.DataLoader",
"torch.load"
]
] |
LinLidi/Machine-Learning-Algorithm-Practice | [
"5af09d0da4f714e8f4c288e5a7a42fe6cb229677"
] | [
"K_Means/K_Means_v1.py"
] | [
"# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef loadDataSet(fileName):\n '''\n :param fileName: dataset file\n :return:data map(list)\n '''\n dataMat = []\n with open(fileName,'r')as f:\n all_data = f.readlines()\n for line in all_data:\n curLine = line.strip().split('\\t')\n fltLine = list(map(float, curLine))\n dataMat.append(fltLine)\n return dataMat\n\ndef distEclud(vecA, vecB):\n '''\n :param vecA: data A Vector\n :param vecB: data B Vector\n :return: Euclidean distance\n '''\n return np.sqrt(np.sum(np.power(vecA - vecB, 2)))\n\n\n\"\"\"\n函数说明:随机初始化k个质心(质心满足数据边界之内)\n\nParameters:\n dataSet - 输入的数据集\n k - 选取k个质心\n \nReturns:\n centroids - 返回初始化得到的k个质心向量\n\nModify:\n 2018-08-02\n\"\"\"\ndef randCent(dataSet, k):\n # 得到数据样本的维度\n n = np.shape(dataSet)[1]\n # 初始化为一个(k,n)的全零矩阵\n centroids = np.mat(np.zeros((k, n)))\n # 遍历数据集的每一个维度\n for j in range(n):\n # 得到该列数据的最小值,最大值\n minJ = np.min(dataSet[:, j])\n maxJ = np.max(dataSet[:, j])\n # 得到该列数据的范围(最大值-最小值)\n rangeJ = float(maxJ - minJ)\n # k个质心向量的第j维数据值随机为位于(最小值,最大值)内的某一值\n # Create an array of the given shape and populate it with random samples from a uniform distribution over [0, 1).\n centroids[:, j] = minJ + rangeJ * np.random.rand(k, 1)\n # 返回初始化得到的k个质心向量\n return centroids\n\n\n\"\"\"\n函数说明:k-means聚类算法\n\nParameters:\n dataSet - 用于聚类的数据集\n k - 选取k个质心\n distMeas - 距离计算方法,默认欧氏距离distEclud()\n createCent - 获取k个质心的方法,默认随机获取randCent()\n \nReturns:\n centroids - k个聚类的聚类结果\n clusterAssment - 聚类误差\n\nModify:\n 2018-08-02\n\"\"\"\ndef kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):\n '''\n :param dataSet: data file [[]]\n :param k: the nums of k\n :param distMeas: the function computed distanse between two points\n :param createCent:\n :return: result by k-means\n '''\n # 获取数据集样本数,初始化一个(m,2)全零矩阵\n m = np.shape(dataSet)[0]\n clusterAssment = np.mat(np.zeros((m, 2)))\n # 创建初始的k个质心向量\n centroids = createCent(dataSet, k)\n\n clusterChanged = True\n while clusterChanged:\n clusterChanged = False\n for i in range(m):\n # 初始化最小距离为正无穷,最小距离对应的索引为-1\n minDist = float('inf')\n minIndex = -1\n # 循环k个类的质心\n for j in range(k):\n distJI = distMeas(centroids[j, :], dataSet[i, :])\n # 如果距离小于当前最小距离\n if distJI < minDist:\n # 当前距离为最小距离,最小距离对应索引应为j(第j个类)\n minDist = distJI\n minIndex = j\n # 当前聚类结果中第i个样本的聚类结果发生变化:布尔值置为True,继续聚类算法\n if clusterAssment[i, 0] != minIndex: \n clusterChanged = True\n # 更新当前变化样本的聚类结果和平方误差\n clusterAssment[i, :] = minIndex, minDist**2\n # 打印k-means聚类的质心\n print(centroids)\n # 遍历每一个质心\n for cent in range(k):\n # 将数据集中所有属于当前质心类的样本通过条件过滤筛选出来\n ptsInClust = dataSet[np.nonzero(clusterAssment[:, 0].A == cent)[0]]\n # 计算这些数据的均值(axis=0:求列均值),作为该类质心向量\n centroids[cent, :] = np.mean(ptsInClust, axis=0)\n # 返回k个聚类,聚类结果及误差\n return centroids, clusterAssment\n \n\n\"\"\"\n函数说明:绘制数据集\n\nParameters:\n fileName - 文件名\n \nReturns:\n None\n\nModify:\n 2018-08-01\n\"\"\"\ndef plotDataSet(filename):\n # 导入数据\n datMat = np.mat(loadDataSet(filename))\n # 进行k-means算法其中k为4\n myCentroids, clustAssing = kMeans(datMat, 4)\n clustAssing = clustAssing.tolist()\n myCentroids = myCentroids.tolist()\n xcord = [[], [], [], []]\n ycord = [[], [], [], []]\n datMat = datMat.tolist()\n m = len(clustAssing)\n for i in range(m):\n if int(clustAssing[i][0]) == 0:\n xcord[0].append(datMat[i][0])\n ycord[0].append(datMat[i][1])\n elif int(clustAssing[i][0]) == 1:\n xcord[1].append(datMat[i][0])\n ycord[1].append(datMat[i][1])\n elif int(clustAssing[i][0]) == 2:\n xcord[2].append(datMat[i][0])\n ycord[2].append(datMat[i][1])\n elif int(clustAssing[i][0]) == 3:\n xcord[3].append(datMat[i][0])\n ycord[3].append(datMat[i][1])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n # 绘制样本点\n ax.scatter(xcord[0], ycord[0], s=20, c='b', marker='*', alpha=.5)\n ax.scatter(xcord[1], ycord[1], s=20, c='r', marker='D', alpha=.5)\n ax.scatter(xcord[2], ycord[2], s=20, c='c', marker='>', alpha=.5)\n ax.scatter(xcord[3], ycord[3], s=20, c='k', marker='o', alpha=.5)\n # 绘制质心\n ax.scatter(myCentroids[0][0], myCentroids[0][1], s=100, c='k', marker='+', alpha=.5)\n ax.scatter(myCentroids[1][0], myCentroids[1][1], s=100, c='k', marker='+', alpha=.5)\n ax.scatter(myCentroids[2][0], myCentroids[2][1], s=100, c='k', marker='+', alpha=.5)\n ax.scatter(myCentroids[3][0], myCentroids[3][1], s=100, c='k', marker='+', alpha=.5)\n plt.title('DataSet')\n plt.xlabel('X')\n plt.show()\n\n\nif __name__ == '__main__':\n plotDataSet('testSet.txt')\n "
] | [
[
"numpy.max",
"numpy.random.rand",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.nonzero",
"numpy.power",
"matplotlib.pyplot.show"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.