repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
kblancato/theia-net
[ "cdb912e1b35701f22928e084913e004352a6fe95" ]
[ "theia-net/classification/modules/model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass CNN(torch.nn.Module):\n \"\"\"\n 1D CNN model architecture.\n \n Attributes\n ----------\n num_in : int\n Exposure in seconds.\n \n n_classes : int\n Number of stellar evolutionary states to classify.\n \n log : _io.TextIOWrapper\n Log file.\n \n kernel1, kernel2 : int\n Kernel width of first and second convolution, respectively.\n \n stride1, stride2 : int\n Stride of first and second convolution, respectively.\n \n padding1, padding2 : int\n Zero-padding of first and second convolution, respectively.\n \n dropout : float\n Dropout probability applied to fully-connected part of network.\n \n hidden1, hidden2, hidden3 : int\n Number of hidden units in the first, second, and third fully-connected\n layers, respectively.\n \n \n Methods\n -------\n forward(x, s)\n Forward pass through the model architecture.\n \"\"\"\n def __init__(self, num_in, n_classes, log, kernel1, kernel2, stride1, \\\n stride2, padding1, padding2, dropout, hidden1=2048, \\\n hidden2=1024, hidden3=256):\n \n super(CNN, self).__init__()\n \n self.log = log\n self.dropout = torch.nn.Dropout(p=dropout)\n self.num_in = num_in\n self.n_classes = n_classes\n print(self.num_in, file=log)\n\n OUT_CHANNELS_1 = 64\n dilation1 = 1\n poolsize1 = 4\n \n OUT_CHANNELS_2 = 16\n dilation2 = 1\n poolsize2 = 2\n \n # first convolutional layer\n self.conv1 = nn.Conv1d(in_channels=1,\n out_channels=OUT_CHANNELS_1,\n kernel_size=kernel1,\n dilation=dilation1,\n stride=stride1,\n padding=padding1)\n self.num_out = ((self.num_in+2*padding1-dilation1* \\\n (kernel1-1)-1)/stride1)+1\n assert str(self.num_out)[-1] == '0'\n print(self.num_out, file=log)\n self.bn1 = nn.BatchNorm1d(num_features=OUT_CHANNELS_1)\n self.pool1 = nn.AvgPool1d(kernel_size=poolsize1)\n self.num_out = (self.num_out/poolsize1)\n assert str(self.num_out)[-1] == '0'\n print(self.num_out, file=log)\n \n # second convolutional layer\n self.conv2 = nn.Conv1d(in_channels=OUT_CHANNELS_1,\n out_channels=OUT_CHANNELS_2,\n kernel_size=kernel2,\n stride=stride2,\n padding=padding2)\n self.num_out = ((self.num_out+2*padding2-dilation2* \\\n (kernel2-1)-1)/stride2)+1\n assert str(self.num_out)[-1] == '0'\n print(self.num_out, file=log)\n self.bn2 = nn.BatchNorm1d(num_features=OUT_CHANNELS_2)\n self.pool2 = nn.AvgPool1d(kernel_size=poolsize2)\n self.num_out = (self.num_out/poolsize2)\n assert str(self.num_out)[-1] == '0'\n print(self.num_out, file=log)\n \n # fully-connected layers\n self.num_out = OUT_CHANNELS_2*self.num_out\n assert str(self.num_out)[-1] == '0'\n print(self.num_out, file=log)\n self.num_out = int(self.num_out)\n \n self.linear1 = nn.Linear(self.num_out+1, hidden1)\n self.linear2 = nn.Linear(hidden1, hidden2)\n self.linear3 = nn.Linear(hidden2, hidden3)\n\n # output prediction\n self.predict = nn.Linear(hidden3, self.n_classes)\n \n\n def forward(self, x, s):\n \"\"\"\n Forward pass through the model architecture.\n \n Parameters\n ----------\n x : array_like\n Input time series data.\n \n s : array_like\n Standard deviation array.\n \n Returns\n ----------\n x : array_like\n Output prediction.\n \"\"\"\n x = self.bn1(self.pool1(F.relu(self.conv1(x))))\n x = self.bn2(self.pool2(F.relu(self.conv2(x))))\n \n x = x.view(-1, self.num_out)\n x = torch.cat((x, s), 1)\n\n x = self.dropout(F.tanh(self.linear1(x)))\n x = self.dropout(F.tanh(self.linear2(x)))\n x = F.relu(self.linear3(x))\n x = self.predict(x)\n \n return x\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.cat", "torch.nn.Linear", "torch.nn.Conv1d", "torch.nn.AvgPool1d" ] ]
kangzhiq/sunpy
[ "fad034a2ca0bebfa041e47b18a0789d2bc4b4aa6" ]
[ "sunpy/timeseries/tests/test_timeseries_factory.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 23 12:08:21 2016\n\n@author: alex_\n\"\"\"\n\nimport os\nimport glob\nimport pytest\nimport datetime\nimport numpy as np\nfrom pandas import DataFrame\nfrom collections import OrderedDict\n\nimport sunpy.data.test\nimport sunpy.timeseries\nfrom sunpy.util.metadata import MetaDict\nimport sunpy.io\nfrom sunpy.util.datatype_factory_base import NoMatchError\nfrom sunpy.util import SunpyUserWarning\n\nimport astropy.units as u\nfrom astropy.table import Table\nfrom sunpy.time import parse_time\nfrom astropy.time import TimeDelta\nfrom astropy.io import fits\n\n# ==============================================================================\n# TimeSeries Factory Tests\n# ==============================================================================\n\nfilepath = sunpy.data.test.rootdir\neve_filepath = os.path.join(filepath, 'EVE_L0CS_DIODES_1m_truncated.txt')\nesp_filepath = os.path.join(filepath, 'eve_l1_esp_2011046_00_truncated.fits')\nfermi_gbm_filepath = os.path.join(filepath, 'gbm.fits')\nnorh_filepath = os.path.join(filepath, 'tca110810_truncated')\nlyra_filepath = os.path.join(filepath, 'lyra_20150101-000000_lev3_std_truncated.fits.gz')\nrhessi_filepath = os.path.join(filepath, 'hsi_obssumm_20120601_018_truncated.fits.gz')\nnoaa_ind_filepath = os.path.join(filepath, 'RecentIndices_truncated.txt')\nnoaa_pre_filepath = os.path.join(filepath, 'predicted-sunspot-radio-flux_truncated.txt')\ngoes_filepath_com = os.path.join(filepath, 'go1520120601.fits.gz')\ngoes_filepath = os.path.join(filepath, 'go1520110607.fits')\na_list_of_many = glob.glob(os.path.join(filepath, \"eve\", \"*\"))\n\n# ==============================================================================\n# Multi file Tests\n# ==============================================================================\n\n\nclass TestTimeSeries(object):\n def test_factory_concatenate_same_source(self):\n # Test making a TimeSeries that is the concatenation of multiple files\n ts_from_list = sunpy.timeseries.TimeSeries(a_list_of_many, source='EVE', concatenate=True)\n assert isinstance(ts_from_list, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)\n ts_from_folder = sunpy.timeseries.TimeSeries(os.path.join(filepath, \"eve\"), source='EVE', concatenate=True)\n assert isinstance(ts_from_folder, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)\n # text the two methods get identical dataframes\n assert ts_from_list == ts_from_folder\n # test the frames have correct headings/keys (correct concatenation axis)\n ts_from_list.columns == sunpy.timeseries.TimeSeries(a_list_of_many[0], source='EVE', concatenate=True).columns\n\n def test_factory_concatenate_different_source(self):\n # Test making a TimeSeries that is the concatenation of multiple files\n ts_from_list = sunpy.timeseries.TimeSeries(a_list_of_many, source='EVE', concatenate=True)\n assert isinstance(ts_from_list, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)\n ts_from_folder = sunpy.timeseries.TimeSeries(os.path.join(filepath, \"eve\"), source='EVE', concatenate=True)\n assert isinstance(ts_from_folder, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)\n # text the two methods get identical dataframes\n assert ts_from_list == ts_from_folder\n # test the frames have correct headings/keys (correct concatenation axis)\n ts_from_list.columns == sunpy.timeseries.TimeSeries(a_list_of_many[0], source='EVE', concatenate=True).columns\n\n def test_factory_generate_list_of_ts(self):\n # Test making a list TimeSeries from multiple files\n ts_list = sunpy.timeseries.TimeSeries(a_list_of_many, source='EVE')\n assert isinstance(ts_list, list)\n for ts in ts_list:\n assert isinstance(ts, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)\n\n def test_factory_generate_from_glob(self):\n # Test making a TimeSeries from a glob\n ts_from_glob = sunpy.timeseries.TimeSeries(os.path.join(filepath, \"eve\", \"*\"), source='EVE', concatenate=True)\n assert isinstance(ts_from_glob, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)\n\n#==============================================================================\n# Individual Implicit Source Tests\n#==============================================================================\n\n def test_implicit_fermi_gbm(self):\n # Test a GBMSummary TimeSeries\n with pytest.warns(UserWarning, match='Discarding nonzero nanoseconds'):\n ts_gbm = sunpy.timeseries.TimeSeries(fermi_gbm_filepath)\n assert isinstance(ts_gbm, sunpy.timeseries.sources.fermi_gbm.GBMSummaryTimeSeries)\n\n def test_implicit_norh(self):\n # Test a NoRH TimeSeries\n ts_norh = sunpy.timeseries.TimeSeries(norh_filepath)\n assert isinstance(ts_norh, sunpy.timeseries.sources.norh.NoRHTimeSeries)\n\n def test_implicit_goes(self):\n # Test a GOES TimeSeries\n ts_goes = sunpy.timeseries.TimeSeries(goes_filepath)\n assert isinstance(ts_goes, sunpy.timeseries.sources.goes.XRSTimeSeries)\n\n def test_implicit_goes_com(self):\n # Test a GOES TimeSeries\n ts_goes = sunpy.timeseries.TimeSeries(goes_filepath_com)\n assert isinstance(ts_goes, sunpy.timeseries.sources.goes.XRSTimeSeries)\n\n def test_implicit_lyra(self):\n # Test a LYRA TimeSeries\n ts_lyra = sunpy.timeseries.TimeSeries(lyra_filepath)\n assert isinstance(ts_lyra, sunpy.timeseries.sources.lyra.LYRATimeSeries)\n\n def test_implicit_rhessi(self):\n # Test a RHESSI TimeSeries\n ts_rhessi = sunpy.timeseries.TimeSeries(rhessi_filepath)\n assert isinstance(ts_rhessi, sunpy.timeseries.sources.rhessi.RHESSISummaryTimeSeries)\n\n def test_implicit_esp(self):\n # Test an ESP TimeSeries\n ts_esp = sunpy.timeseries.TimeSeries(esp_filepath)\n assert isinstance(ts_esp, sunpy.timeseries.sources.eve.ESPTimeSeries)\n\n#==============================================================================\n# Individual Explicit Sources Tests\n#==============================================================================\n\n def test_eve(self):\n #Test an EVE TimeSeries\n ts_eve = sunpy.timeseries.TimeSeries(eve_filepath, source='EVE')\n assert isinstance(ts_eve, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)\n\n def test_esp(self):\n #Test an ESP TimeSeries\n ts_esp = sunpy.timeseries.TimeSeries(esp_filepath, source='ESP')\n assert isinstance(ts_esp, sunpy.timeseries.sources.eve.ESPTimeSeries)\n\n def test_fermi_gbm(self):\n #Test a GBMSummary TimeSeries\n with pytest.warns(UserWarning, match='Discarding nonzero nanoseconds'):\n ts_gbm = sunpy.timeseries.TimeSeries(fermi_gbm_filepath, source='GBMSummary')\n assert isinstance(ts_gbm, sunpy.timeseries.sources.fermi_gbm.GBMSummaryTimeSeries)\n\n def test_norh(self):\n #Test a NoRH TimeSeries\n ts_norh = sunpy.timeseries.TimeSeries(norh_filepath, source='NoRH')\n assert isinstance(ts_norh, sunpy.timeseries.sources.norh.NoRHTimeSeries)\n\n def test_goes(self):\n #Test a GOES TimeSeries\n ts_goes = sunpy.timeseries.TimeSeries(goes_filepath, source='XRS')\n assert isinstance(ts_goes, sunpy.timeseries.sources.goes.XRSTimeSeries)\n\n def test_goes_com(self):\n #Test a GOES TimeSeries\n ts_goes = sunpy.timeseries.TimeSeries(goes_filepath_com, source='XRS')\n assert isinstance(ts_goes, sunpy.timeseries.sources.goes.XRSTimeSeries)\n\n def test_lyra(self):\n #Test a LYRA TimeSeries\n ts_lyra = sunpy.timeseries.TimeSeries(lyra_filepath, source='LYRA')\n assert isinstance(ts_lyra, sunpy.timeseries.sources.lyra.LYRATimeSeries)\n\n def test_rhessi(self):\n #Test a RHESSI TimeSeries\n ts_rhessi = sunpy.timeseries.TimeSeries(rhessi_filepath, source='RHESSI')\n assert isinstance(ts_rhessi, sunpy.timeseries.sources.rhessi.RHESSISummaryTimeSeries)\n\n def test_noaa_ind(self):\n #Test a NOAAPredictIndices TimeSeries\n ts_noaa_ind = sunpy.timeseries.TimeSeries(noaa_ind_filepath, source='NOAAIndices')\n assert isinstance(ts_noaa_ind, sunpy.timeseries.sources.noaa.NOAAIndicesTimeSeries)\n\n def test_noaa_pre(self):\n #Test a NOAAIndices TimeSeries\n ts_noaa_pre = sunpy.timeseries.TimeSeries(noaa_pre_filepath, source='NOAAPredictIndices')\n assert isinstance(ts_noaa_pre, sunpy.timeseries.sources.noaa.NOAAPredictIndicesTimeSeries)\n\n# ==============================================================================\n# Remote Sources Tests\n# ==============================================================================\n\n @pytest.mark.remote_data\n def test_goes_remote(self):\n # Older format file\n goes = sunpy.timeseries.TimeSeries(\n 'https://umbra.nascom.nasa.gov/goes/fits/1986/go06860129.fits')\n assert isinstance(goes, sunpy.timeseries.sources.goes.XRSTimeSeries)\n # Newer format\n goes = sunpy.timeseries.TimeSeries(\n 'https://umbra.nascom.nasa.gov/goes/fits/2018/go1520180626.fits')\n assert isinstance(goes, sunpy.timeseries.sources.goes.XRSTimeSeries)\n\n#==============================================================================\n# Manual TimeSeries Tests\n#==============================================================================\n\n def test_meta_from_fits_header(self):\n # Generate the data and the corrisponding dates\n base = parse_time(datetime.datetime.today())\n times = base - TimeDelta(np.arange(24*60)*u.minute)\n intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))\n data = DataFrame(intensity, index=times, columns=['intensity'])\n\n # Use a FITS file HDU using sunpy.io\n hdulist = sunpy.io.read_file(goes_filepath)\n meta = hdulist[0].header\n meta_md = MetaDict(OrderedDict(meta))\n ts_hdu_meta = sunpy.timeseries.TimeSeries(data, meta)\n ts_md_meta = sunpy.timeseries.TimeSeries(data, meta_md)\n assert ts_hdu_meta == ts_md_meta\n\n # Use a FITS file HDU using astropy.io\n hdulist = fits.open(goes_filepath)\n meta = hdulist[0].header\n hdulist.close()\n meta_md = MetaDict(sunpy.io.header.FileHeader(meta))\n ts_hdu_meta = sunpy.timeseries.TimeSeries(data, meta)\n ts_md_meta = sunpy.timeseries.TimeSeries(data, meta_md)\n assert ts_hdu_meta == ts_md_meta\n\n def test_generic_construction_basic(self):\n # Generate the data and the corrisponding dates\n base = parse_time(datetime.datetime.today())\n times = base - TimeDelta(np.arange(24 * 60)*u.minute)\n intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))\n\n # Create the data DataFrame, header MetaDict and units OrderedDict\n data = DataFrame(intensity, index=times, columns=['intensity'])\n units = OrderedDict([('intensity', u.W/u.m**2)])\n meta = MetaDict({'key':'value'})\n\n # Create normal TS from dataframe and check\n ts_generic = sunpy.timeseries.TimeSeries(data, meta, units)\n assert isinstance(ts_generic, sunpy.timeseries.timeseriesbase.GenericTimeSeries)\n assert ts_generic.columns == ['intensity']\n assert ts_generic.units == units\n assert ts_generic.meta.metadata[0][2] == meta\n\n # Create TS using a tuple of values\n ts_tuple = sunpy.timeseries.TimeSeries(((data, meta, units),))\n assert isinstance(ts_tuple, sunpy.timeseries.timeseriesbase.GenericTimeSeries)\n assert ts_generic == ts_tuple\n\n\n def test_generic_construction_basic_omitted_details(self):\n # Generate the data and the corrisponding dates\n base = parse_time(datetime.datetime.today())\n times = base - TimeDelta(np.arange(24 * 60)*u.minute)\n intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))\n\n # Create the data DataFrame, header MetaDict and units OrderedDict\n data = DataFrame(intensity, index=times, columns=['intensity'])\n units = OrderedDict([('intensity', u.W/u.m**2)])\n meta = MetaDict({'key':'value'})\n\n # Create TS omitting units input arguments\n ts_1 = sunpy.timeseries.TimeSeries(data, meta)\n assert isinstance(ts_1, sunpy.timeseries.timeseriesbase.GenericTimeSeries)\n assert ts_1.columns == ['intensity']\n assert ts_1.units == OrderedDict([('intensity', u.dimensionless_unscaled)])\n assert ts_1.meta.metadata[0][2] == meta\n\n ts_2 = sunpy.timeseries.TimeSeries(data, units)\n assert isinstance(ts_2, sunpy.timeseries.timeseriesbase.GenericTimeSeries)\n assert ts_2.columns == ['intensity']\n assert ts_2.units == units\n assert ts_2.meta.metadata[0][2] == MetaDict()\n\n def test_generic_construction_basic_different_meta_types(self):\n # Generate the data and the corrisponding dates\n base = parse_time(datetime.datetime.today())\n times = base - TimeDelta(np.arange(24 * 60)*u.minute)\n intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))\n\n # Create the data DataFrame, header MetaDict and units OrderedDict\n data = DataFrame(intensity, index=times, columns=['intensity'])\n units = OrderedDict([('intensity', u.W/u.m**2)])\n meta_md = MetaDict({'key':'value'})\n meta_di = {'key':'value'}\n meta_od = OrderedDict({'key':'value'})\n\n # Create TS using different dictionary meta types\n ts_md = sunpy.timeseries.TimeSeries(data, meta_md, units)\n ts_di = sunpy.timeseries.TimeSeries(data, meta_di, units)\n ts_od = sunpy.timeseries.TimeSeries(data, meta_od, units)\n assert ts_md == ts_di == ts_od\n assert ts_md.meta.metadata[0][2] == ts_di.meta.metadata[0][2] == ts_od.meta.metadata[0][2]\n\n def test_generic_construction_ts_list(self):\n # Generate the data and the corrisponding dates\n base = parse_time(datetime.datetime.today())\n times = base - TimeDelta(np.arange(24 * 60)*u.minute)\n intensity1 = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))\n intensity2 = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))\n\n # Create the data DataFrame, header MetaDict and units OrderedDict\n data = DataFrame(intensity1, index=times, columns=['intensity'])\n data2 = DataFrame(intensity2, index=times, columns=['intensity2'])\n units = OrderedDict([('intensity', u.W/u.m**2)])\n units2 = OrderedDict([('intensity2', u.W/u.m**2)])\n meta = MetaDict({'key': 'value'})\n meta2 = MetaDict({'key2': 'value2'})\n\n # Create TS individually\n ts_1 = sunpy.timeseries.TimeSeries(data, meta, units)\n ts_2 = sunpy.timeseries.TimeSeries(data2, meta2, units2)\n\n # Create TS list using\n ts_list = sunpy.timeseries.TimeSeries(data, meta, units, data2, meta2, units2)\n assert isinstance(ts_list, list)\n assert len(ts_list) == 2\n assert ts_list[0] == ts_1\n assert ts_list[1] == ts_2\n\n # Create TS using a tuple\n ts_list2 = sunpy.timeseries.TimeSeries(((data, meta, units),(data2, meta2, units2)))\n assert ts_list == ts_list2\n\n def test_generic_construction_concatenation(self):\n # Generate the data and the corrisponding dates\n base = parse_time(datetime.datetime.today())\n times = base - TimeDelta(np.arange(24 * 60)*u.minute)\n intensity1 = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))\n intensity2 = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))\n\n # Create the data DataFrame, header MetaDict and units OrderedDict\n data = DataFrame(intensity1, index=times, columns=['intensity'])\n data2 = DataFrame(intensity2, index=times, columns=['intensity2'])\n units = OrderedDict([('intensity', u.W/u.m**2)])\n units2 = OrderedDict([('intensity2', u.W/u.m**2)])\n meta = MetaDict({'key':'value'})\n meta2 = MetaDict({'key2':'value2'})\n\n # Create TS individually\n ts_1 = sunpy.timeseries.TimeSeries(data, meta, units)\n ts_2 = sunpy.timeseries.TimeSeries(data2, meta2, units2)\n ts_concat_1 = ts_1.concatenate(ts_2)\n\n # Concatinate during construction\n ts_concat_2 = sunpy.timeseries.TimeSeries(data, meta, units, data2, meta2, units2, concatenate=True)\n assert isinstance(ts_concat_2, sunpy.timeseries.timeseriesbase.GenericTimeSeries)\n\n # Create TS using a tuple\n ts_concat_3 = sunpy.timeseries.TimeSeries(((data, meta, units),(data2, meta2, units2)), concatenate=True)\n assert isinstance(ts_concat_3, sunpy.timeseries.timeseriesbase.GenericTimeSeries)\n assert ts_concat_1 == ts_concat_2 == ts_concat_3\n\n def test_table_to_ts(self):\n # Generate the data and the corresponding dates\n base = parse_time(datetime.datetime.today())\n times = base - TimeDelta(np.arange(24 * 60)*u.minute)\n intensity = u.Quantity(np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60)))), u.W/u.m**2)\n\n # Create the units and meta objects\n units = OrderedDict([('intensity', u.W/u.m**2)])\n meta = MetaDict({'key':'value'})\n tbl_meta = MetaDict({'t_key':'t_value'})\n\n # Create a suitable mixin qtable\n table = Table([times, intensity], names=['time', 'intensity'], meta=tbl_meta)\n table.add_index('time')\n\n # Create TS from table and check\n ts_table = sunpy.timeseries.TimeSeries(table, meta, units)\n assert isinstance(ts_table, sunpy.timeseries.timeseriesbase.GenericTimeSeries)\n ts_table2 = sunpy.timeseries.TimeSeries(table, units, meta)\n assert (ts_table2 == ts_table)\n\n # Create TS using a tuple of values\n ts_table3 = sunpy.timeseries.TimeSeries((table, meta, units))\n assert isinstance(ts_table3, sunpy.timeseries.timeseriesbase.GenericTimeSeries)\n\n # ToDo: Try an incompatible table\n dual_index_table = Table([times, intensity], names=['time', 'intensity'], meta=tbl_meta)\n dual_index_table.add_index(('time', 'intensity'))\n with pytest.raises(ValueError):\n sunpy.timeseries.TimeSeries((dual_index_table, meta, units))\n\n# ==============================================================================\n# Test some other options\n# ==============================================================================\n\n def test_passed_ts(self):\n # Test an EVE TimeSeries\n with pytest.warns(SunpyUserWarning, match='Unknown units'):\n ts_eve = sunpy.timeseries.TimeSeries(eve_filepath, source='EVE')\n ts_from_ts_1 = sunpy.timeseries.TimeSeries(ts_eve, source='EVE')\n ts_from_ts_2 = sunpy.timeseries.TimeSeries(ts_eve)\n assert ts_eve == ts_from_ts_1 == ts_from_ts_2\n\n#==============================================================================\n# Test some Errors\n#==============================================================================\n\n def test_invalid_manual_data(self):\n meta = MetaDict({'key':'value'})\n data = []\n with pytest.raises(NoMatchError):\n sunpy.timeseries.TimeSeries(data, meta)\n\n def test_invalid_filepath(self):\n invalid_filepath = os.path.join(filepath, 'invalid_filepath_here')\n with pytest.raises(NoMatchError):\n sunpy.timeseries.TimeSeries(invalid_filepath)\n # Now with silence_errors kwarg set\n with pytest.raises(NoMatchError):\n sunpy.timeseries.TimeSeries(invalid_filepath, silence_errors=True)\n\n def test_invalid_file(self):\n invalid_filepath = os.path.join(filepath, 'annotation_ppt.db')\n with pytest.raises(TypeError):\n sunpy.timeseries.TimeSeries(invalid_filepath)\n # Now with silence_errors kwarg set\n with pytest.raises(TypeError):\n sunpy.timeseries.TimeSeries(invalid_filepath, silence_errors=True)\n\n def test_validate_units(self):\n valid_units = OrderedDict([('Watt Per Meter Squared', u.Unit(\"W / m2\")), ('Meter Cubed', u.Unit(\"m3\"))])\n assert sunpy.timeseries.TimeSeries._validate_units(valid_units)\n # Test for not having only units for values\n invalid_units_1 = OrderedDict([('Watt Per Meter Squared', 'string'), ('Meter Cubed', u.Unit(\"m3\"))])\n assert not sunpy.timeseries.TimeSeries._validate_units(invalid_units_1)\n # Test for being a MetaDict object\n invalid_units_2 = MetaDict(OrderedDict([('Watt Per Meter Squared', u.Unit(\"W / m2\")), ('Meter Cubed', u.Unit(\"m3\"))]))\n assert not sunpy.timeseries.TimeSeries._validate_units(invalid_units_2)\n\n def test_validate_meta_basic(self):\n valid_meta_1 = MetaDict({'key':'value'})\n assert sunpy.timeseries.TimeSeries._validate_meta(valid_meta_1)\n valid_meta_2 = OrderedDict({'key':'value'})\n assert sunpy.timeseries.TimeSeries._validate_meta(valid_meta_2)\n invalid_meta = []\n assert not sunpy.timeseries.TimeSeries._validate_meta(invalid_meta)\n\n def test_validate_meta_astropy_header(self):\n # Manually open a goes file for the sunpy.io.header.FileHeader test\n hdus = sunpy.io.read_file(goes_filepath)\n header = hdus[0].header\n assert sunpy.timeseries.TimeSeries._validate_meta(header)\n # Manually open a goes file for the astropy.io.fits.header.Header test\n hdulist = fits.open(goes_filepath)\n header = hdulist[0].header\n hdulist.close()\n assert sunpy.timeseries.TimeSeries._validate_meta(header)\n" ]
[ [ "numpy.arange", "pandas.DataFrame" ] ]
skyduy/zfverify
[ "a49e314df7b8b3822dd941b44d68c0fde77df9c9" ]
[ "Verify-Manual-python/predict/predictOneVsAll.py" ]
[ "# coding: utf-8\nfrom numpy import dot, hstack, ones, argmax\nfrom sigmoid import sigmoid\n\n\ndef predictOneVsAll(all_theta, X):\n m = X.shape[0]\n\n X = hstack((ones((m, 1)), X))\n\n real_all_theta = all_theta.transpose()\n all_predict = sigmoid(dot(X, real_all_theta))\n\n Accuracy = all_predict.max(1)\n p = argmax(all_predict, axis=1)\n\n return Accuracy, p\n\n" ]
[ [ "numpy.dot", "numpy.argmax", "numpy.ones" ] ]
middlec000/wordler
[ "ad76dea50f0baab398d16366bfc55557ae187fce" ]
[ "src/main.py" ]
[ "import streamlit as st\nimport pandas as pd\nfrom helper_methods import *\n\n\ndef suggest(df: pd.DataFrame, original_length: int, num_words_to_display: int, sort_by: str) -> None:\n \"\"\"\n Print the suggested words nicely and ordered by the desired metric.\n\n Args:\n df (pd.DataFrame): Remaining words.\n original_length (int): Original number of words.\n num_words_to_display (int): Number of words to display.\n sort_by (str): How suggestions should be sorted. Supported options:\n 'Word Frequency', \n 'Letter Frequency', \n 'Letter at Position Frequency'\n \"\"\"\n sort_by_to_col_map = {'Word Frequency': 'wordFreq', 'Letter Frequency': 'letterFreqSum', 'Letter at Position Frequency': 'letterPosFreqSum'}\n sort_by_col = sort_by_to_col_map[sort_by]\n df = df.sort_values(by=sort_by_col, ascending=False)\n st.write(f'Words Remaining: {len(df)} ({len(df)*100/original_length:.2f}%)')\n for i in range(min(num_words_to_display, len(df))):\n st.markdown(f\"<div style='text-align: center'> {df.iloc[i].name} </div>\", unsafe_allow_html=True)\n return\n\n\ndef main():\n # Set Page Configuration\n st.set_page_config(\n initial_sidebar_state='collapsed'\n )\n num_words_to_display = int(st.sidebar.number_input(label='Number of words to suggest', min_value=0, value=10))\n remove_previous_words = st.sidebar.radio(label='Remove previously used Wordle words?', options=['Yes', 'No'], index=1) == 'Yes'\n sort_by = st.sidebar.radio(label='Sort suggested words (high to low) by', options=['Word Frequency', 'Letter Frequency', 'Letter at Position Frequency'], index=1)\n\n # Get Data\n datapath = 'Data-Preprocessed/word_freq.csv'\n # datapath = 'https://raw.githubusercontent.com/middlec000/wordler/main/Data-Preprocessed/word_freq_wordle_only.csv'\n data = pd.read_csv(datapath).sort_values(by='wordFreq', ascending=False).set_index('word')\n original_length = len(data)\n \n # User Instructions\n st.write('# The Wordler')\n st.write('## Here to help you win Wordle!')\n instructions = st.expander(\"HELP\")\n with instructions:\n st.write('Wordle website: [https://www.nytimes.com/games/wordle/index.html](https://www.nytimes.com/games/wordle/index.html)')\n st.write('Enter Wordle feedback according to the following mapping:\\n* Gray -> 0\\n* Yellow -> 1\\n* Green -> 2')\n st.write('Example Entry: hello-10112')\n st.write('Enter each of your five letter Wordle guesses, a dash (-), then the feedback you get from Wordle in the text boxes below:')\n st.write('Open the sidebar (upper left >) for additional options.')\n st.write('See the GitHub repo for the code and how calculations are performed: \\n[https://github.com/middlec000/wordler](https://github.com/middlec000/wordler)')\n\n st.write('## Enter Guesses and Feedback')\n user_inputs = []\n # Get user input\n for i in range(1,7):\n user_input = st.text_input(label=f'Word{i}-XXXXX',value='', max_chars=11).upper()\n if user_input:\n user_inputs.append(user_input)\n \n st.write('## Words Suggested by Wordler:')\n # Check User Input\n guesses, bad_input = check_convert_input(user_inputs=user_inputs)\n if bad_input:\n st.warning('Input is invalid - please see example above.')\n # Display (Filtered) Words\n if guesses and not bad_input:\n filtered_data = filter_words(guesses=guesses, words=data, remove_previous_wordle_words=remove_previous_words)\n suggest(df=filtered_data, original_length=original_length, num_words_to_display=num_words_to_display, sort_by=sort_by)\n else:\n suggest(df=data, original_length=original_length, num_words_to_display=num_words_to_display, sort_by=sort_by)\n return\n\nif __name__ == '__main__':\n main()" ]
[ [ "pandas.read_csv" ] ]
Rocketknight1/pytorch-pretrained-BERT
[ "2c03c10d5e34badf17298e8f070c8c0169febe22" ]
[ "pytorch_pretrained_bert/modeling.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\nimport json\nimport logging\nimport math\nimport os\nimport sys\nfrom io import open\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom .file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME\n\nlogger = logging.getLogger(__name__)\n\nPRETRAINED_MODEL_ARCHIVE_MAP = {\n 'bert-base-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin\",\n 'bert-large-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin\",\n 'bert-base-german-cased': \"https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-pytorch_model.bin\",\n 'bert-large-uncased-whole-word-masking': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin\",\n 'bert-large-cased-whole-word-masking': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin\",\n 'bert-large-uncased-whole-word-masking-finetuned-squad': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin\",\n 'bert-large-cased-whole-word-masking-finetuned-squad': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin\",\n 'bert-base-cased-finetuned-mrpc': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin\",\n}\nPRETRAINED_CONFIG_ARCHIVE_MAP = {\n 'bert-base-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json\",\n 'bert-large-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json\",\n 'bert-base-german-cased': \"https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-config.json\",\n 'bert-large-uncased-whole-word-masking': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json\",\n 'bert-large-cased-whole-word-masking': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json\",\n 'bert-large-uncased-whole-word-masking-finetuned-squad': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json\",\n 'bert-large-cased-whole-word-masking-finetuned-squad': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json\",\n 'bert-base-cased-finetuned-mrpc': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json\",\n}\nBERT_CONFIG_NAME = 'bert_config.json'\nTF_WEIGHTS_NAME = 'model.ckpt'\n\ndef prune_linear_layer(layer, index, dim=0):\n \"\"\" Prune a linear layer (a model parameters) to keep only entries in index.\n Return the pruned layer as a new layer with requires_grad=True.\n Used to remove heads.\n \"\"\"\n index = index.to(layer.weight.device)\n W = layer.weight.index_select(dim, index).clone().detach()\n if layer.bias is not None:\n if dim == 1:\n b = layer.bias.clone().detach()\n else:\n b = layer.bias[index].clone().detach()\n new_size = list(layer.weight.size())\n new_size[dim] = len(index)\n new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)\n new_layer.weight.requires_grad = False\n new_layer.weight.copy_(W.contiguous())\n new_layer.weight.requires_grad = True\n if layer.bias is not None:\n new_layer.bias.requires_grad = False\n new_layer.bias.copy_(b.contiguous())\n new_layer.bias.requires_grad = True\n return new_layer\n\n\ndef load_tf_weights_in_bert(model, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n print(\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n print(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n print(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\", \"global_step\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'squad':\n pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, l[0])\n except AttributeError:\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish}\n\n\nclass BertConfig(object):\n \"\"\"Configuration class to store the configuration of a `BertModel`.\n \"\"\"\n def __init__(self,\n vocab_size_or_config_json_file,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n layer_norm_eps=1e-12):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n layer_norm_eps: The epsilon used by LayerNorm.\n \"\"\"\n if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2\n and isinstance(vocab_size_or_config_json_file, unicode)):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.vocab_size = vocab_size_or_config_json_file\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n else:\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \"or the path to a pretrained model config file (str)\")\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path):\n \"\"\" Save this instance to a json file.\"\"\"\n with open(json_file_path, \"w\", encoding='utf-8') as writer:\n writer.write(self.to_json_string())\n\ntry:\n from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm\nexcept ImportError:\n logger.info(\"Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .\")\n class BertLayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n \"\"\"\n super(BertLayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.weight * x + self.bias\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, token_type_ids=None):\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = words_embeddings + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config, output_attentions=False, keep_multihead_output=False):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.output_attentions = output_attentions\n self.keep_multihead_output = keep_multihead_output\n self.multihead_output = None\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, hidden_states, attention_mask, head_mask=None):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n if self.keep_multihead_output:\n self.multihead_output = context_layer\n self.multihead_output.retain_grad()\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n if self.output_attentions:\n return attention_probs, context_layer\n return context_layer\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super(BertSelfOutput, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config, output_attentions=False, keep_multihead_output=False):\n super(BertAttention, self).__init__()\n self.output_attentions = output_attentions\n self.self = BertSelfAttention(config, output_attentions=output_attentions,\n keep_multihead_output=keep_multihead_output)\n self.output = BertSelfOutput(config)\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)\n for head in heads:\n mask[head] = 0\n mask = mask.view(-1).contiguous().eq(1)\n index = torch.arange(len(mask))[mask].long()\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n # Update hyper params\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n\n def forward(self, input_tensor, attention_mask, head_mask=None):\n self_output = self.self(input_tensor, attention_mask, head_mask)\n if self.output_attentions:\n attentions, self_output = self_output\n attention_output = self.output(self_output, input_tensor)\n if self.output_attentions:\n return attentions, attention_output\n return attention_output\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super(BertIntermediate, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super(BertOutput, self).__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config, output_attentions=False, keep_multihead_output=False):\n super(BertLayer, self).__init__()\n self.output_attentions = output_attentions\n self.attention = BertAttention(config, output_attentions=output_attentions,\n keep_multihead_output=keep_multihead_output)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(self, hidden_states, attention_mask, head_mask=None):\n attention_output = self.attention(hidden_states, attention_mask, head_mask)\n if self.output_attentions:\n attentions, attention_output = attention_output\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n if self.output_attentions:\n return attentions, layer_output\n return layer_output\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config, output_attentions=False, keep_multihead_output=False):\n super(BertEncoder, self).__init__()\n self.output_attentions = output_attentions\n self.layer = nn.ModuleList([BertLayer(config, output_attentions=output_attentions,\n keep_multihead_output=keep_multihead_output) for _ in\n range(config.num_hidden_layers)])\n\n def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, head_mask=None):\n all_encoder_layers = []\n all_attentions = []\n for i, layer_module in enumerate(self.layer):\n hidden_states = layer_module(hidden_states, attention_mask, head_mask[i])\n if self.output_attentions:\n attentions, hidden_states = hidden_states\n all_attentions.append(attentions)\n if output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n if not output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n if self.output_attentions:\n return all_attentions, all_encoder_layers\n return all_encoder_layers\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super(BertPooler, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super(BertPredictionHeadTransform, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertLMPredictionHead, self).__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(bert_model_embedding_weights.size(1),\n bert_model_embedding_weights.size(0),\n bias=False)\n self.decoder.weight = bert_model_embedding_weights\n self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states) + self.bias\n return hidden_states\n\n\nclass BertOnlyMLMHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertOnlyMLMHead, self).__init__()\n self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyNSPHead, self).__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertPreTrainingHeads, self).__init__()\n self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass BertPreTrainedModel(nn.Module):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n def __init__(self, config, *inputs, **kwargs):\n super(BertPreTrainedModel, self).__init__()\n if not isinstance(config, BertConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of class `BertConfig`. \"\n \"To create a model from a Google pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n ))\n self.config = config\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):\n \"\"\"\n Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.\n Download and cache the pre-trained model file if needed.\n\n Params:\n pretrained_model_name_or_path: either:\n - a str with the name of a pre-trained model to load selected in the list of:\n . `bert-base-uncased`\n . `bert-large-uncased`\n . `bert-base-cased`\n . `bert-large-cased`\n . `bert-base-multilingual-uncased`\n . `bert-base-multilingual-cased`\n . `bert-base-chinese`\n . `bert-base-german-cased`\n . `bert-large-uncased-whole-word-masking`\n . `bert-large-cased-whole-word-masking`\n - a path or url to a pretrained model archive containing:\n . `bert_config.json` a configuration file for the model\n . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance\n - a path or url to a pretrained model archive containing:\n . `bert_config.json` a configuration file for the model\n . `model.chkpt` a TensorFlow checkpoint\n from_tf: should we load the weights from a locally saved TensorFlow checkpoint\n cache_dir: an optional path to a folder in which the pre-trained models will be cached.\n state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models\n *inputs, **kwargs: additional input for the specific Bert class\n (ex: num_labels for BertForSequenceClassification)\n \"\"\"\n state_dict = kwargs.get('state_dict', None)\n kwargs.pop('state_dict', None)\n cache_dir = kwargs.get('cache_dir', None)\n kwargs.pop('cache_dir', None)\n from_tf = kwargs.get('from_tf', False)\n kwargs.pop('from_tf', None)\n\n if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:\n archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]\n config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]\n else:\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME)\n config_file = os.path.join(pretrained_model_name_or_path, BERT_CONFIG_NAME)\n else:\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except EnvironmentError:\n if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:\n logger.error(\n \"Couldn't reach server at '{}' to download pretrained weights.\".format(\n archive_file))\n else:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name_or_path,\n ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),\n archive_file))\n return None\n try:\n resolved_config_file = cached_path(config_file, cache_dir=cache_dir)\n except EnvironmentError:\n if pretrained_model_name_or_path in PRETRAINED_CONFIG_ARCHIVE_MAP:\n logger.error(\n \"Couldn't reach server at '{}' to download pretrained model configuration file.\".format(\n config_file))\n else:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name_or_path,\n ', '.join(PRETRAINED_CONFIG_ARCHIVE_MAP.keys()),\n config_file))\n return None\n if resolved_archive_file == archive_file and resolved_config_file == config_file:\n logger.info(\"loading weights file {}\".format(archive_file))\n logger.info(\"loading configuration file {}\".format(config_file))\n else:\n logger.info(\"loading weights file {} from cache at {}\".format(\n archive_file, resolved_archive_file))\n logger.info(\"loading configuration file {} from cache at {}\".format(\n config_file, resolved_config_file))\n # Load config\n config = BertConfig.from_json_file(resolved_config_file)\n logger.info(\"Model config {}\".format(config))\n # Instantiate model.\n model = cls(config, *inputs, **kwargs)\n if state_dict is None and not from_tf:\n state_dict = torch.load(resolved_archive_file, map_location='cpu')\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n return load_tf_weights_in_bert(model, weights_path)\n # Load from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if 'gamma' in key:\n new_key = key.replace('gamma', 'weight')\n if 'beta' in key:\n new_key = key.replace('beta', 'bias')\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n start_prefix = ''\n if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):\n start_prefix = 'bert.'\n load(model, prefix=start_prefix)\n if len(missing_keys) > 0:\n logger.info(\"Weights of {} not initialized from pretrained model: {}\".format(\n model.__class__.__name__, missing_keys))\n if len(unexpected_keys) > 0:\n logger.info(\"Weights from pretrained model not used in {}: {}\".format(\n model.__class__.__name__, unexpected_keys))\n if len(error_msgs) > 0:\n raise RuntimeError('Error(s) in loading state_dict for {}:\\n\\t{}'.format(\n model.__class__.__name__, \"\\n\\t\".join(error_msgs)))\n return model\n\n\nclass BertModel(BertPreTrainedModel):\n \"\"\"BERT model (\"Bidirectional Embedding Representations from a Transformer\").\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n\n Outputs: Tuple of (encoded_layers, pooled_output)\n `encoded_layers`: controled by `output_all_encoded_layers` argument:\n - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end\n of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each\n encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],\n - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding\n to the last attention block of shape [batch_size, sequence_length, hidden_size],\n `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a\n classifier pretrained on top of the hidden state associated to the first character of the\n input (`CLS`) to train on the Next-Sentence task (see BERT's paper).\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = modeling.BertModel(config=config)\n all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, output_attentions=False, keep_multihead_output=False):\n super(BertModel, self).__init__(config)\n self.output_attentions = output_attentions\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config, output_attentions=output_attentions,\n keep_multihead_output=keep_multihead_output)\n self.pooler = BertPooler(config)\n self.apply(self.init_bert_weights)\n\n def prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n def get_multihead_outputs(self):\n \"\"\" Gather all multi-head outputs.\n Return: list (layers) of multihead module outputs with gradients\n \"\"\"\n return [layer.attention.self.multihead_output for layer in self.encoder.layer]\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True, head_mask=None):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand_as(self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer\n head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n embedding_output = self.embeddings(input_ids, token_type_ids)\n encoded_layers = self.encoder(embedding_output,\n extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers,\n head_mask=head_mask)\n if self.output_attentions:\n all_attentions, encoded_layers = encoded_layers\n sequence_output = encoded_layers[-1]\n pooled_output = self.pooler(sequence_output)\n if not output_all_encoded_layers:\n encoded_layers = encoded_layers[-1]\n if self.output_attentions:\n return all_attentions, encoded_layers, pooled_output\n return encoded_layers, pooled_output\n\n\nclass BertForPreTraining(BertPreTrainedModel):\n \"\"\"BERT model with pre-training heads.\n This module comprises the BERT model followed by the two pre-training heads:\n - the masked language modeling head, and\n - the next sentence classification head.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss\n is only computed for the labels set in [0, ..., vocab_size]\n `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]\n with indices selected in [0, 1].\n 0 => next sentence is the continuation, 1 => next sentence is a random sentence.\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n Outputs:\n if `masked_lm_labels` and `next_sentence_label` are not `None`:\n Outputs the total_loss which is the sum of the masked language modeling loss and the next\n sentence classification loss.\n if `masked_lm_labels` or `next_sentence_label` is `None`:\n Outputs a tuple comprising\n - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and\n - the next sentence classification logits of shape [batch_size, 2].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForPreTraining(config)\n masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, output_attentions=False, keep_multihead_output=False):\n super(BertForPreTraining, self).__init__(config)\n self.output_attentions = output_attentions\n self.bert = BertModel(config, output_attentions=output_attentions,\n keep_multihead_output=keep_multihead_output)\n self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, head_mask=None):\n outputs = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False, head_mask=head_mask)\n if self.output_attentions:\n all_attentions, sequence_output, pooled_output = outputs\n else:\n sequence_output, pooled_output = outputs\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n if masked_lm_labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n return total_loss\n elif self.output_attentions:\n return all_attentions, prediction_scores, seq_relationship_score\n return prediction_scores, seq_relationship_score\n\n\nclass BertForMaskedLM(BertPreTrainedModel):\n \"\"\"BERT model with the masked language modeling head.\n This module comprises the BERT model followed by the masked language modeling head.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss\n is only computed for the labels set in [0, ..., vocab_size]\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n Outputs:\n if `masked_lm_labels` is not `None`:\n Outputs the masked language modeling loss.\n if `masked_lm_labels` is `None`:\n Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForMaskedLM(config)\n masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, output_attentions=False, keep_multihead_output=False):\n super(BertForMaskedLM, self).__init__(config)\n self.output_attentions = output_attentions\n self.bert = BertModel(config, output_attentions=output_attentions,\n keep_multihead_output=keep_multihead_output)\n self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, head_mask=None):\n outputs = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False,\n head_mask=head_mask)\n if self.output_attentions:\n all_attentions, sequence_output, _ = outputs\n else:\n sequence_output, _ = outputs\n prediction_scores = self.cls(sequence_output)\n\n if masked_lm_labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n return masked_lm_loss\n elif self.output_attentions:\n return all_attentions, prediction_scores\n return prediction_scores\n\n\nclass BertForNextSentencePrediction(BertPreTrainedModel):\n \"\"\"BERT model with next sentence prediction head.\n This module comprises the BERT model followed by the next sentence classification head.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]\n with indices selected in [0, 1].\n 0 => next sentence is the continuation, 1 => next sentence is a random sentence.\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n Outputs:\n if `next_sentence_label` is not `None`:\n Outputs the total_loss which is the sum of the masked language modeling loss and the next\n sentence classification loss.\n if `next_sentence_label` is `None`:\n Outputs the next sentence classification logits of shape [batch_size, 2].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForNextSentencePrediction(config)\n seq_relationship_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, output_attentions=False, keep_multihead_output=False):\n super(BertForNextSentencePrediction, self).__init__(config)\n self.output_attentions = output_attentions\n self.bert = BertModel(config, output_attentions=output_attentions,\n keep_multihead_output=keep_multihead_output)\n self.cls = BertOnlyNSPHead(config)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, head_mask=None):\n outputs = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False,\n head_mask=head_mask)\n if self.output_attentions:\n all_attentions, _, pooled_output = outputs\n else:\n _, pooled_output = outputs\n seq_relationship_score = self.cls(pooled_output)\n\n if next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n return next_sentence_loss\n elif self.output_attentions:\n return all_attentions, seq_relationship_score\n return seq_relationship_score\n\n\nclass BertForSequenceClassification(BertPreTrainedModel):\n \"\"\"BERT model for classification.\n This module is composed of the BERT model with a linear layer on top of\n the pooled output.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n `num_labels`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary. Items in the batch should begin with the special \"CLS\" token. (see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_labels].\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_labels = 2\n\n model = BertForSequenceClassification(config, num_labels)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False):\n super(BertForSequenceClassification, self).__init__(config)\n self.output_attentions = output_attentions\n self.num_labels = num_labels\n self.bert = BertModel(config, output_attentions=output_attentions,\n keep_multihead_output=keep_multihead_output)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, num_labels)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):\n outputs = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, head_mask=head_mask)\n if self.output_attentions:\n all_attentions, _, pooled_output = outputs\n else:\n _, pooled_output = outputs\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss\n elif self.output_attentions:\n return all_attentions, logits\n return logits\n\n\nclass BertForMultipleChoice(BertPreTrainedModel):\n \"\"\"BERT model for multiple choice tasks.\n This module is composed of the BERT model with a linear layer on top of\n the pooled output.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n `num_choices`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]\n with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`\n and type 1 corresponds to a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_choices].\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])\n input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])\n token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_choices = 2\n\n model = BertForMultipleChoice(config, num_choices)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_choices=2, output_attentions=False, keep_multihead_output=False):\n super(BertForMultipleChoice, self).__init__(config)\n self.output_attentions = output_attentions\n self.num_choices = num_choices\n self.bert = BertModel(config, output_attentions=output_attentions,\n keep_multihead_output=keep_multihead_output)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):\n flat_input_ids = input_ids.view(-1, input_ids.size(-1))\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n outputs = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False, head_mask=head_mask)\n if self.output_attentions:\n all_attentions, _, pooled_output = outputs\n else:\n _, pooled_output = outputs\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, self.num_choices)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n return loss\n elif self.output_attentions:\n return all_attentions, reshaped_logits\n return reshaped_logits\n\n\nclass BertForTokenClassification(BertPreTrainedModel):\n \"\"\"BERT model for token-level classification.\n This module is composed of the BERT model with a linear layer on top of\n the full hidden state of the last layer.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n `num_labels`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [0, ..., num_labels].\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, sequence_length, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_labels = 2\n\n model = BertForTokenClassification(config, num_labels)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False):\n super(BertForTokenClassification, self).__init__(config)\n self.output_attentions = output_attentions\n self.num_labels = num_labels\n self.bert = BertModel(config, output_attentions=output_attentions,\n keep_multihead_output=keep_multihead_output)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, num_labels)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):\n outputs = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, head_mask=head_mask)\n if self.output_attentions:\n all_attentions, sequence_output, _ = outputs\n else:\n sequence_output, _ = outputs\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss\n elif self.output_attentions:\n return all_attentions, logits\n return logits\n\n\nclass BertForQuestionAnswering(BertPreTrainedModel):\n \"\"\"BERT model for Question Answering (span extraction).\n This module is composed of the BERT model with a linear layer on top of\n the sequence output that computes start_logits and end_logits\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n Outputs:\n if `start_positions` and `end_positions` are not `None`:\n Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.\n if `start_positions` or `end_positions` is `None`:\n Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end\n position tokens of shape [batch_size, sequence_length].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForQuestionAnswering(config)\n start_logits, end_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, output_attentions=False, keep_multihead_output=False):\n super(BertForQuestionAnswering, self).__init__(config)\n self.output_attentions = output_attentions\n self.bert = BertModel(config, output_attentions=output_attentions,\n keep_multihead_output=keep_multihead_output)\n self.qa_outputs = nn.Linear(config.hidden_size, 2)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,\n end_positions=None, head_mask=None):\n outputs = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False,\n head_mask=head_mask)\n if self.output_attentions:\n all_attentions, sequence_output, _ = outputs\n else:\n sequence_output, _ = outputs\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n return total_loss\n elif self.output_attentions:\n return all_attentions, start_logits, end_logits\n return start_logits, end_logits\n" ]
[ [ "torch.nn.Softmax", "torch.load", "torch.zeros", "torch.nn.Embedding", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.sqrt", "torch.from_numpy", "torch.arange", "tensorflow.train.list_variables", "torch.ones_like", "torch.sigmoid", "torch.zeros_like", "tensorflow.train.load_variable", "torch.nn.Linear", "numpy.transpose", "torch.nn.Tanh", "torch.matmul" ] ]
flo-compbio/xlmhg
[ "c29d913386443396254774b8cff5cff2b5731323" ]
[ "tests/01_algorithms/test_correct_pval.py" ]
[ "# Copyright (c) 2016-2019 Florian Wagner\n#\n# This file is part of XL-mHG.\n\n\"\"\"Tests for the Cython implementations of the XL-mHG p-value..\"\"\"\n\nimport numpy as np\nfrom scipy.stats import hypergeom\n\nfrom xlmhg import mhg, mhg_cython\n\ndef test_cross():\n \"\"\"Compares p-values calculated using PVAL1 and PVAL2.\"\"\"\n N = 50\n K = 10\n\n #tol = 1e-11\n tol = 1e-8\n\n W = N-K\n table = np.empty((K+1, W+1), dtype=np.longdouble)\n\n # calculate hypergeometric p-values for all configurations\n configs = np.ones((K+1, W+1), dtype=np.float64)\n for k in range(1, K+1):\n for w in range(W):\n n = k+w\n configs[k, w] = hypergeom.sf(k-1, N, K, n)\n\n tests = 0\n for X in range(1, N+1):\n for L in range(N, 0, -1):\n # calculate all possible XL-mHG test statistics\n S = np.ones((K+1, W+1), dtype=np.float64)\n for n in range(L+1):\n k = min(K, n)\n w = n-k\n while k >= X and w <= W and n <= L:\n S[k, w] = configs[k, w]\n k -= 1\n w += 1\n\n all_stat = np.sort(np.unique(S.ravel()))[::-1]\n\n for stat in all_stat:\n pval1 = mhg_cython.get_xlmhg_pval1(N, K, X, L, stat, table)\n pval2 = mhg_cython.get_xlmhg_pval2(N, K, X, L, stat, table)\n tests += 1\n assert mhg.is_equal(pval1, pval2, tol=tol)\n\n print('Calculated %d bounds, based on %d configurations.'\n %(tests, configs.size))\n" ]
[ [ "scipy.stats.hypergeom.sf", "numpy.empty", "numpy.ones" ] ]
guylapid/materialize
[ "8629a120a5a628b6ef06f379b48ba723797db944" ]
[ "demo/http_logs/apps/loadgen.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright Materialize, Inc. and contributors. All rights reserved.\n#\n# Use of this software is governed by the Business Source License\n# included in the LICENSE file at the root of this repository.\n#\n# As of the Change Date specified in that file, in accordance with\n# the Business Source License, use of this software will be governed\n# by the Apache License, Version 2.0.\n\nimport bisect\nimport random\nimport string\nimport sys\nimport time\nfrom collections import namedtuple\nfrom enum import Enum, auto\n\nimport numpy as np\nimport requests\n\n\nclass State(Enum):\n GATEWAY = 0\n SEARCH = auto()\n DETAIL = auto()\n QUIT = auto()\n DO_NOTHING = auto()\n\n\ndef default_sampler():\n return np.random.zipf(1.1) - 1\n\n\ndef gen_items(n):\n names = set()\n alnum = string.ascii_letters + string.digits\n\n def gen_name():\n old_len = len(names)\n x = \"\"\n while len(names) == old_len:\n x = \"\".join(random.choices(alnum, k=8))\n names.add(x)\n assert x\n return x\n\n return [gen_name() for _ in range(n)]\n\n\ndef get_index(items, sampler):\n z = len(items)\n while z >= len(items):\n z = sampler()\n return z\n\n\ndef get_item(items, sampler=default_sampler):\n return items[get_index(items, sampler)]\n\n\ndef next_state(behavior, cur_state):\n cb = behavior[cur_state.value]\n return np.random.choice(State, p=cb / np.sum(cb))\n\n\nDEFAULT_BEHAVIOR = [\n [1, 8, 2, 1, 1], # gateway\n [1, 3, 3, 1, 1], # search\n [1, 5, 2, 1, 1], # detail\n [10, 2, 2, 0, 0], # quit (used for new user)\n]\n\nDEFAULT_URL = \"http://server:5000\"\n\nDEFAULT_ITEMS = gen_items(1000000)\n\nDEFAULT_NEW_USERS_PER_TICK = 100\n\nDEFAULT_TICK_SLEEP_SECONDS = 1\n\nwith open(\"./words.txt\") as f:\n WORDS = f.read().splitlines()\n\n\ndef path_for_state(state):\n if state == State.GATEWAY:\n return \"/\"\n if state == State.SEARCH:\n return \"/search/?kw={}+{}+{}\".format(\n random.choice(WORDS), random.choice(WORDS), random.choice(WORDS)\n )\n if state == State.DETAIL:\n return \"/detail/{}\".format(get_item(DEFAULT_ITEMS))\n assert False # This should not be called for other states\n\n\nclass User:\n def __init__(self, behavior=DEFAULT_BEHAVIOR):\n self.behavior = behavior\n self.state = next_state(self.behavior, State.QUIT)\n self.ip = \"{}.{}.{}.{}\".format(\n random.randint(0, 127),\n random.randint(0, 127),\n random.randint(0, 127),\n random.randint(0, 127),\n )\n\n def take_action(self, base_url=DEFAULT_URL):\n assert self.state != State.QUIT\n if self.state == State.DO_NOTHING:\n self.state = self.old_state\n assert self.state != State.DO_NOTHING\n else:\n self.old_state = self.state\n url = \"{}{}\".format(base_url, path_for_state(self.state))\n # TODO - throughput can be higher if this is made async\n requests.get(url, headers={\"X-Forwarded-For\": self.ip})\n self.state = next_state(self.behavior, self.state)\n\n\nclass Simulation:\n def __init__(self):\n self.users = []\n\n def tick(self):\n time_begin = time.time()\n self.users.extend(\n (User() for _ in range(np.random.poisson(DEFAULT_NEW_USERS_PER_TICK)))\n )\n old_len_users = len(self.users)\n random.shuffle(self.users)\n for u in self.users:\n u.take_action()\n self.users = [u for u in self.users if u.state != State.QUIT]\n elapsed = time.time() - time_begin\n print(\n \"{} users acted in {}s. {} quit.\".format(\n old_len_users, elapsed, old_len_users - len(self.users)\n )\n )\n sys.stdout.flush()\n if elapsed < DEFAULT_TICK_SLEEP_SECONDS:\n time.sleep(DEFAULT_TICK_SLEEP_SECONDS - elapsed)\n\n\nif __name__ == \"__main__\":\n s = Simulation()\n while True:\n s.tick()\n" ]
[ [ "numpy.random.poisson", "numpy.sum", "numpy.random.zipf" ] ]
pints-team/markov-builder
[ "7accb6e7bc64a2a2afa35d4594c3f4dc3284923c" ]
[ "examples/simulation.py" ]
[ "#!/usr/bin/env python3\n\n# Simulate data from the Beattie model and M10 model using a Gillespie\n# algorithm output plots into examples/example_output or\n# MARKOVBUILDER_EXAMPLE_OUTPUT if it exists\n\nimport logging\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom markov_builder import example_models\n\n\ndef main():\n # First define functions which output the values of each transition rate\n # for a given voltage (as dictionaries)\n # Perform the simulations\n mc = example_models.construct_four_state_chain()\n protocol = ((-80, 100), (20, 200))\n SimulateStepProtocol(mc, protocol, name=\"Beattie\")\n\n\ndef SimulateStepProtocol(mc, protocol, name: str = \"\"):\n fig = plt.figure(figsize=(8, 8))\n ax1 = fig.add_subplot(211)\n no_trajectories = 100\n dist = None\n data = [pd.DataFrame(columns=(\"time\", *mc.graph.nodes))]\n last_time = 0\n eqm_data = []\n param_dict = mc.default_values\n\n for voltage, time_to in protocol:\n param_dict['V'] = voltage\n\n data.append(mc.sample_trajectories(no_trajectories, (last_time, time_to),\n starting_distribution=dist, param_dict=param_dict))\n dist = data[-1].values[-1, 1:]\n _, A = mc.eval_transition_matrix(param_dict)\n # compute steady states\n labels, ss = mc.get_equilibrium_distribution(param_dict=param_dict)\n ss = ss * no_trajectories\n eqm_data = eqm_data + [[last_time, *ss]] + [[time_to, *ss]]\n last_time = time_to\n\n eqm_data = pd.DataFrame(eqm_data, columns=['time'] + [lb + ' eqm distribution' for lb in labels]).set_index(\"time\")\n\n data = pd.concat(data).set_index(\"time\").sort_index()\n\n data.plot(ax=ax1)\n eqm_data.plot(style=\"--\", ax=ax1)\n\n ax2 = fig.add_subplot(212)\n\n # Need each voltage twice - at the beginning and end of each step\n voltages = [[v, v] for v, _ in protocol]\n voltages = [v for voltage in voltages for v in voltage]\n times = [0]\n for _, time_to in protocol:\n times = times + [time_to] * 2\n times = times[0:-1]\n ax2.plot(times, voltages)\n fpath = os.path.join(output_dir, \"SimulateStepProtocol_{}.pdf\".format(name))\n plt.savefig(fpath)\n logging.info(\"wrote to %s\" % fpath)\n\n\nif __name__ == \"__main__\":\n global output_dir\n output_dir = os.environ.get('MARKOVBUILDER_EXAMPLE_OUTPUT', os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"example_output\"))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n output_dir = output_dir\n logging.basicConfig(stream=sys.stderr, level=logging.INFO)\n logging.info(\"outputting to \" + output_dir)\n main()\n" ]
[ [ "pandas.concat", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ] ]
hummat/if-net
[ "6eb6b3860159ba0a46167844020d8cbc7717fbb4" ]
[ "models/local_model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n# 1D convolution is used for the decoder. It acts as a standard FC, but allows to use a batch of point samples features,\n# additionally to the batch over the input objects.\n# The dimensions are used as follows:\n# batch_size (N) = #3D objects , channels = features, signal_lengt (L) (convolution dimension) = #point samples\n# kernel_size = 1 i.e. every convolution is done over only all features of one point sample, this makes it a FC.\n\n\n# ShapeNet Voxel Super-Resolution --------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------\nclass ShapeNet32Vox(nn.Module):\n\n def __init__(self, hidden_dim=256):\n super(ShapeNet32Vox, self).__init__()\n\n self.conv_1 = nn.Conv3d(1, 32, 3, padding=1) # out: 32\n self.conv_1_1 = nn.Conv3d(32, 64, 3, padding=1) # out: 32\n self.conv_2 = nn.Conv3d(64, 128, 3, padding=1) # out: 16\n self.conv_2_1 = nn.Conv3d(128, 128, 3, padding=1) # out: 16\n self.conv_3 = nn.Conv3d(128, 128, 3, padding=1) # out: 8\n self.conv_3_1 = nn.Conv3d(128, 128, 3, padding=1) # out: 8\n\n feature_size = (1 + 64 + 128 + 128) * 7\n self.fc_0 = nn.Conv1d(feature_size, hidden_dim * 2, 1)\n self.fc_1 = nn.Conv1d(hidden_dim * 2, hidden_dim, 1)\n self.fc_2 = nn.Conv1d(hidden_dim, hidden_dim, 1)\n self.fc_out = nn.Conv1d(hidden_dim, 1, 1)\n self.actvn = nn.ReLU()\n\n self.maxpool = nn.MaxPool3d(2)\n\n self.conv1_1_bn = nn.BatchNorm3d(64)\n self.conv2_1_bn = nn.BatchNorm3d(128)\n self.conv3_1_bn = nn.BatchNorm3d(128)\n\n displacement = 0.035\n displacements = [[0, 0, 0]]\n for x in range(3):\n for y in [-1, 1]:\n input = [0, 0, 0]\n input[x] = y * displacement\n displacements.append(input)\n\n self.displacements = torch.Tensor(displacements).cuda()\n\n def forward(self, p, x):\n x = x.unsqueeze(1)\n\n p_features = p.transpose(1, -1)\n p = p.unsqueeze(1).unsqueeze(1)\n p = torch.cat([p + d for d in self.displacements], dim=2) # (B,1,7,num_samples,3)\n feature_0 = F.grid_sample(x, p) # out : (B,C (of x), 1,1,sample_num)\n\n net = self.actvn(self.conv_1(x))\n net = self.actvn(self.conv_1_1(net))\n net = self.conv1_1_bn(net)\n feature_1 = F.grid_sample(net, p) # out : (B,C (of x), 1,1,sample_num)\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_2(net))\n net = self.actvn(self.conv_2_1(net))\n net = self.conv2_1_bn(net)\n feature_2 = F.grid_sample(net, p)\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_3(net))\n net = self.actvn(self.conv_3_1(net))\n net = self.conv3_1_bn(net)\n feature_3 = F.grid_sample(net, p)\n\n # here every channel corresponse to one feature.\n\n features = torch.cat((feature_0, feature_1, feature_2, feature_3),\n dim=1) # (B, features, 1,7,sample_num)\n shape = features.shape\n features = torch.reshape(features,\n (shape[0], shape[1] * shape[3], shape[4])) # (B, featues_per_sample, samples_num)\n # features = torch.cat((features, p_features), dim=1) # (B, featue_size, samples_num)\n\n net = self.actvn(self.fc_0(features))\n net = self.actvn(self.fc_1(net))\n net = self.actvn(self.fc_2(net))\n net = self.fc_out(net)\n out = net.squeeze(1)\n\n return out\n\n\nclass ShapeNet128Vox(nn.Module):\n\n def __init__(self, hidden_dim=256):\n super(ShapeNet128Vox, self).__init__()\n # accepts 128**3 res input\n self.conv_in = nn.Conv3d(1, 16, 3, padding=1) # out: 128\n self.conv_0 = nn.Conv3d(16, 32, 3, padding=1) # out: 64\n self.conv_0_1 = nn.Conv3d(32, 32, 3, padding=1) # out: 64\n self.conv_1 = nn.Conv3d(32, 64, 3, padding=1) # out: 32\n self.conv_1_1 = nn.Conv3d(64, 64, 3, padding=1) # out: 32\n self.conv_2 = nn.Conv3d(64, 128, 3, padding=1) # out: 16\n self.conv_2_1 = nn.Conv3d(128, 128, 3, padding=1) # out: 16\n self.conv_3 = nn.Conv3d(128, 128, 3, padding=1) # out: 8\n self.conv_3_1 = nn.Conv3d(128, 128, 3, padding=1) # out: 8\n\n feature_size = (1 + 16 + 32 + 64 + 128 + 128) * 7\n self.fc_0 = nn.Conv1d(feature_size, hidden_dim, 1)\n self.fc_1 = nn.Conv1d(hidden_dim, hidden_dim, 1)\n self.fc_2 = nn.Conv1d(hidden_dim, hidden_dim, 1)\n self.fc_out = nn.Conv1d(hidden_dim, 1, 1)\n self.actvn = nn.ReLU()\n\n self.maxpool = nn.MaxPool3d(2)\n\n self.conv_in_bn = nn.BatchNorm3d(16)\n self.conv0_1_bn = nn.BatchNorm3d(32)\n self.conv1_1_bn = nn.BatchNorm3d(64)\n self.conv2_1_bn = nn.BatchNorm3d(128)\n self.conv3_1_bn = nn.BatchNorm3d(128)\n\n displacment = 0.0722\n displacments = []\n displacments.append([0, 0, 0])\n for x in range(3):\n for y in [-1, 1]:\n input = [0, 0, 0]\n input[x] = y * displacment\n displacments.append(input)\n\n self.displacments = torch.Tensor(displacments).cuda()\n\n def forward(self, p, x):\n x = x.unsqueeze(1)\n\n p_features = p.transpose(1, -1)\n p = p.unsqueeze(1).unsqueeze(1)\n p = torch.cat([p + d for d in self.displacments], dim=2) # (B,1,7,num_samples,3)\n feature_0 = F.grid_sample(x, p) # out : (B,C (of x), 1,1,sample_num)\n\n net = self.actvn(self.conv_in(x))\n net = self.conv_in_bn(net)\n feature_1 = F.grid_sample(net, p) # out : (B,C (of x), 1,1,sample_num)\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_0(net))\n net = self.actvn(self.conv_0_1(net))\n net = self.conv0_1_bn(net)\n feature_2 = F.grid_sample(net, p) # out : (B,C (of x), 1,1,sample_num)\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_1(net))\n net = self.actvn(self.conv_1_1(net))\n net = self.conv1_1_bn(net)\n feature_3 = F.grid_sample(net, p) # out : (B,C (of x), 1,1,sample_num)\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_2(net))\n net = self.actvn(self.conv_2_1(net))\n net = self.conv2_1_bn(net)\n feature_4 = F.grid_sample(net, p)\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_3(net))\n net = self.actvn(self.conv_3_1(net))\n net = self.conv3_1_bn(net)\n feature_5 = F.grid_sample(net, p)\n\n # here every channel corresponse to one feature.\n\n features = torch.cat((feature_0, feature_1, feature_2, feature_3, feature_4, feature_5),\n dim=1) # (B, features, 1,7,sample_num)\n shape = features.shape\n features = torch.reshape(features,\n (shape[0], shape[1] * shape[3], shape[4])) # (B, featues_per_sample, samples_num)\n # features = torch.cat((features, p_features), dim=1) # (B, featue_size, samples_num)\n\n net = self.actvn(self.fc_0(features))\n net = self.actvn(self.fc_1(net))\n net = self.actvn(self.fc_2(net))\n net = self.fc_out(net)\n out = net.squeeze(1)\n\n return out\n\n\n# ShapeNet Pointcloud Completion ---------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------\n\nclass ShapeNetPoints(nn.Module):\n\n def __init__(self, hidden_dim: int = 256, displacements: bool = True):\n super(ShapeNetPoints, self).__init__()\n # 128**3 res input\n self.conv_in = nn.Conv3d(1, 16, 3, padding=1, padding_mode='border')\n self.conv_0 = nn.Conv3d(16, 32, 3, padding=1, padding_mode='border')\n self.conv_0_1 = nn.Conv3d(32, 32, 3, padding=1, padding_mode='border')\n self.conv_1 = nn.Conv3d(32, 64, 3, padding=1, padding_mode='border')\n self.conv_1_1 = nn.Conv3d(64, 64, 3, padding=1, padding_mode='border')\n self.conv_2 = nn.Conv3d(64, 128, 3, padding=1, padding_mode='border')\n self.conv_2_1 = nn.Conv3d(128, 128, 3, padding=1, padding_mode='border')\n self.conv_3 = nn.Conv3d(128, 128, 3, padding=1, padding_mode='border')\n self.conv_3_1 = nn.Conv3d(128, 128, 3, padding=1, padding_mode='border')\n\n feature_size = (1 + 16 + 32 + 64 + 128 + 128) * 7\n self.fc_0 = nn.Conv1d(feature_size, hidden_dim, 1)\n self.fc_1 = nn.Conv1d(hidden_dim, hidden_dim, 1)\n self.fc_2 = nn.Conv1d(hidden_dim, hidden_dim, 1)\n self.fc_out = nn.Conv1d(hidden_dim, 1, 1)\n self.actvn = nn.ReLU()\n\n self.maxpool = nn.MaxPool3d(2)\n\n self.conv_in_bn = nn.BatchNorm3d(16)\n self.conv0_1_bn = nn.BatchNorm3d(32)\n self.conv1_1_bn = nn.BatchNorm3d(64)\n self.conv2_1_bn = nn.BatchNorm3d(128)\n self.conv3_1_bn = nn.BatchNorm3d(128)\n\n if displacements:\n displacement = 0.0722\n displacements = []\n displacements.append([0, 0, 0])\n for x in range(3):\n for y in [-1, 1]:\n input = [0, 0, 0]\n input[x] = y * displacement\n displacements.append(input)\n\n self.displacements = torch.Tensor(displacements).cuda()\n else:\n self.displacements = []\n\n def forward(self, p, x):\n x = x.unsqueeze(1)\n\n # p_features = p.transpose(1, -1)\n p = p.unsqueeze(1).unsqueeze(1)\n p = torch.cat([p + d for d in self.displacements], dim=2) # (B,1,7,num_samples,3)\n feature_0 = F.grid_sample(x, p, padding_mode='border') # out : (B,C (of x), 1,1,sample_num)\n\n net = self.actvn(self.conv_in(x))\n net = self.conv_in_bn(net)\n feature_1 = F.grid_sample(net, p, padding_mode='border') # out : (B,C (of x), 1,1,sample_num)\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_0(net))\n net = self.actvn(self.conv_0_1(net))\n net = self.conv0_1_bn(net)\n feature_2 = F.grid_sample(net, p, padding_mode='border') # out : (B,C (of x), 1,1,sample_num)\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_1(net))\n net = self.actvn(self.conv_1_1(net))\n net = self.conv1_1_bn(net)\n feature_3 = F.grid_sample(net, p, padding_mode='border') # out : (B,C (of x), 1,1,sample_num)\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_2(net))\n net = self.actvn(self.conv_2_1(net))\n net = self.conv2_1_bn(net)\n feature_4 = F.grid_sample(net, p, padding_mode='border')\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_3(net))\n net = self.actvn(self.conv_3_1(net))\n net = self.conv3_1_bn(net)\n feature_5 = F.grid_sample(net, p, padding_mode='border')\n\n # here every channel corresponds to one feature. (B, features, 1,7,sample_num)\n features = torch.cat((feature_0, feature_1, feature_2, feature_3, feature_4, feature_5), dim=1)\n shape = features.shape\n # (B, featues_per_sample, samples_num)\n features = torch.reshape(features, (shape[0], shape[1] * shape[3], shape[4]))\n # features = torch.cat((features, p_features), dim=1) # (B, featue_size, samples_num)\n\n net = self.actvn(self.fc_0(features))\n net = self.actvn(self.fc_1(net))\n net = self.actvn(self.fc_2(net))\n net = self.fc_out(net)\n out = net.squeeze(1)\n\n return out\n\n\n# 3D Single View Reconsturction (for 256**3 input voxelization) --------------------------------------\n# ----------------------------------------------------------------------------------------------------\n\nclass SVR(nn.Module):\n\n def __init__(self, hidden_dim=256):\n super(SVR, self).__init__()\n\n self.conv_in = nn.Conv3d(1, 16, 3, padding=1, padding_mode='border') # out: 256 ->m.p. 128\n self.conv_0 = nn.Conv3d(16, 32, 3, padding=1, padding_mode='border') # out: 128\n self.conv_0_1 = nn.Conv3d(32, 32, 3, padding=1, padding_mode='border') # out: 128 ->m.p. 64\n self.conv_1 = nn.Conv3d(32, 64, 3, padding=1, padding_mode='border') # out: 64\n self.conv_1_1 = nn.Conv3d(64, 64, 3, padding=1, padding_mode='border') # out: 64 -> mp 32\n self.conv_2 = nn.Conv3d(64, 128, 3, padding=1, padding_mode='border') # out: 32\n self.conv_2_1 = nn.Conv3d(128, 128, 3, padding=1, padding_mode='border') # out: 32 -> mp 16\n self.conv_3 = nn.Conv3d(128, 128, 3, padding=1, padding_mode='border') # out: 16\n self.conv_3_1 = nn.Conv3d(128, 128, 3, padding=1, padding_mode='border') # out: 16 -> mp 8\n self.conv_4 = nn.Conv3d(128, 128, 3, padding=1, padding_mode='border') # out: 8\n self.conv_4_1 = nn.Conv3d(128, 128, 3, padding=1, padding_mode='border') # out: 8\n\n feature_size = (1 + 16 + 32 + 64 + 128 + 128 + 128) * 7 + 3\n self.fc_0 = nn.Conv1d(feature_size, hidden_dim * 2, 1)\n self.fc_1 = nn.Conv1d(hidden_dim * 2, hidden_dim, 1)\n self.fc_2 = nn.Conv1d(hidden_dim, hidden_dim, 1)\n self.fc_out = nn.Conv1d(hidden_dim, 1, 1)\n self.actvn = nn.ReLU()\n\n self.maxpool = nn.MaxPool3d(2)\n\n self.conv_in_bn = nn.BatchNorm3d(16)\n self.conv0_1_bn = nn.BatchNorm3d(32)\n self.conv1_1_bn = nn.BatchNorm3d(64)\n self.conv2_1_bn = nn.BatchNorm3d(128)\n self.conv3_1_bn = nn.BatchNorm3d(128)\n self.conv4_1_bn = nn.BatchNorm3d(128)\n\n displacment = 0.0722\n displacments = []\n displacments.append([0, 0, 0])\n for x in range(3):\n for y in [-1, 1]:\n input = [0, 0, 0]\n input[x] = y * displacment\n displacments.append(input)\n\n self.displacments = torch.Tensor(displacments).cuda()\n\n def forward(self, p, x):\n x = x.unsqueeze(1)\n\n p_features = p.transpose(1, -1)\n p = p.unsqueeze(1).unsqueeze(1)\n p = torch.cat([p + d for d in self.displacments], dim=2)\n feature_0 = F.grid_sample(x, p, padding_mode='border')\n\n net = self.actvn(self.conv_in(x))\n net = self.conv_in_bn(net)\n feature_1 = F.grid_sample(net, p, padding_mode='border')\n net = self.maxpool(net) # out 128\n\n net = self.actvn(self.conv_0(net))\n net = self.actvn(self.conv_0_1(net))\n net = self.conv0_1_bn(net)\n feature_2 = F.grid_sample(net, p, padding_mode='border')\n net = self.maxpool(net) # out 64\n\n net = self.actvn(self.conv_1(net))\n net = self.actvn(self.conv_1_1(net))\n net = self.conv1_1_bn(net)\n feature_3 = F.grid_sample(net, p, padding_mode='border')\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_2(net))\n net = self.actvn(self.conv_2_1(net))\n net = self.conv2_1_bn(net)\n feature_4 = F.grid_sample(net, p, padding_mode='border')\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_3(net))\n net = self.actvn(self.conv_3_1(net))\n net = self.conv3_1_bn(net)\n feature_5 = F.grid_sample(net, p, padding_mode='border')\n net = self.maxpool(net)\n\n net = self.actvn(self.conv_4(net))\n net = self.actvn(self.conv_4_1(net))\n net = self.conv4_1_bn(net)\n feature_6 = F.grid_sample(net, p, padding_mode='border')\n\n # here every channel corresponse to one feature.\n\n features = torch.cat((feature_0, feature_1, feature_2, feature_3, feature_4, feature_5, feature_6),\n dim=1) # (B, features, 1,7,sample_num)\n shape = features.shape\n features = torch.reshape(features,\n (shape[0], shape[1] * shape[3], shape[4])) # (B, featues_per_sample, samples_num)\n features = torch.cat((features, p_features), dim=1) # (B, featue_size, samples_num)\n\n net = self.actvn(self.fc_0(features))\n net = self.actvn(self.fc_1(net))\n net = self.actvn(self.fc_2(net))\n net = self.fc_out(net)\n out = net.squeeze(1)\n\n return out\n" ]
[ [ "torch.Tensor", "torch.cat", "torch.reshape", "torch.nn.MaxPool3d", "torch.nn.Conv3d", "torch.nn.functional.grid_sample", "torch.nn.Conv1d", "torch.nn.ReLU", "torch.nn.BatchNorm3d" ] ]
lazy-turtle/SharpMask-RCNN
[ "f6a3106b029b8147cecec429ec9a3d747449274f" ]
[ "mrcnn/model.py" ]
[ "\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\nimport keras.backend as KB\nfrom keras.utils import conv_utils\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.2\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.6')\n\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} min: {:10.5f} max: {:10.5f} {}\".format(\n str(array.shape),\n array.min() if array.size else \"\",\n array.max() if array.size else \"\",\n array.dtype))\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, num_anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - feature_maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)\n pooled = tf.reshape(pooled, shape)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\nclass MaskROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on the last level of the FPN only, the others will be used later on.\n\n Params:\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - feature_maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(MaskROIAlign, self).__init__(**kwargs)\n self.pool_shape = (pool_shape, pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # feature pyramid map [batch, height, width, channels]\n feature_map = inputs[1]\n fshape = tf.shape(feature_map)\n\n # iterate over batches to crop and resize\n boxes_shape = tf.shape(boxes) # [batch, num boxes, 4]\n boxes_flats = tf.reshape(boxes, [-1, 4]) #flatten to [batch * num.boxes, 4]\n\n indices = tf.range(0, boxes_shape[0]) # enumerate batches\n indices = tf.expand_dims(indices, -1) # [batches, 1]\n box_ind = tf.tile(indices, [1, boxes_shape[1]]) #repeat the 'num.boxes' times\n box_ind = tf.reshape(box_ind, [-1]) #get a flat list of batch indices for each box e.g. [000111222]\n\n boxes_flats = tf.stop_gradient(boxes_flats)\n box_ind = tf.stop_gradient(box_ind)\n\n crops = tf.image.crop_and_resize(feature_map, boxes_flats, box_ind, self.pool_shape, method='bilinear')\n crops = tf.stop_gradient(crops) #just to be sure, you know\n\n # reshape from [batch * n.boxes, pool_size, pool_size, 256] to [batch, n.box, pool, pool 256]\n return tf.reshape(crops, [-1, boxes_shape[1],self.pool_shape[0], self.pool_shape[1], fshape[3]])\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[1][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]\n masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [proposals, crowd_boxes]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n tf.greater(tf.shape(positive_overlaps)[1], 0),\n true_fn = lambda: tf.argmax(positive_overlaps, axis=1),\n false_fn = lambda: tf.cast(tf.constant([]),tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in normalized coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\nclass BilinearUpSampling2D(KL.Layer):\n \"\"\"\n Keras does not have a bilinear upsampling until version 2.2.\n This layer provides a simple way to upscale with bilinear interpolation using tensorflow libraries.\n \"\"\"\n def __init__(self, size=(2, 2), **kwargs):\n super(BilinearUpSampling2D, self).__init__(**kwargs)\n self.size = size\n\n def call(self, inputs):\n shape = KB.shape(inputs)\n dims = [shape[1] * self.size[0], shape[2] * self.size[1]]\n return tf.image.resize_bilinear(inputs, dims, align_corners=True)\n\n def compute_output_shape(self, input_shape):\n size_all_dims = (1,) + self.size + (1,)\n\n output_shape = list(input_shape)\n for dim in range(len(output_shape)):\n if output_shape[dim] is not None:\n output_shape[dim] *= size_all_dims[dim]\n\n return tuple(output_shape)\n\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n\n Returns:\n logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)\n probs: [batch, num_rois, NUM_CLASSES] classifier probabilities\n bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n#max\ndef refinement_module_a(x, rois, fpn_map, pool_size, channels, stage, train_bn):\n f = MaskROIAlign(pool_size, name=\"sharp_mask_ref_roi{}\".format(stage))([rois, fpn_map])\n f = KL.TimeDistributed(KL.Conv2D(channels, (1, 1), padding=\"same\", name=\"sharp_mask_ref_c{}f\".format(stage)),\n name=\"sharp_mask_ref_td{}a\".format(stage))(f)\n f = KL.TimeDistributed(BatchNorm(name=\"sharp_mask_ref_bn{}f\"),\n name='sharp_mask_ref_bn{}f'.format(stage))(f, training=train_bn)\n\n out = KL.Add(name=\"sharp_mask_ref_add{}\".format(stage))([x, f])\n out = KL.TimeDistributed(KL.Conv2D(channels, (3, 3), padding=\"same\", name=\"sharp_mask_ref_c{}out\".format(stage)),\n name=\"sharp_mask_ref_td{}b\".format(stage))(out)\n out = KL.TimeDistributed(BatchNorm(name=\"sharp_mask_ref_bn{}out\".format(stage)),\n name='sharp_mask_ref_td{}c'.format(stage))(out, training=train_bn)\n out = KL.Activation('relu', name=\"sharp_mask_ref_relu{}\".format(stage))(out)\n return out\n\n# #Concat\n# def refinement_module_b(x, rois, fpn_map, pool_size, channels, stage):\n# f = MaskROIAlign(pool_size, name=\"sharp_mask_ref_roi{}\".format(stage))([rois, fpn_map])\n# f = KL.TimeDistributed(\n# KL.Conv2D(int(channels//2), (3, 3), padding=\"same\", name=\"sharp_mask_ref_c{}f\".format(stage)),\n# name=\"sharp_mask_ref_td{}a\".format(stage))(f)\n# f = KL.TimeDistributed(BatchNorm(),name='sharp_mask_ref_bn{}f'.format(stage))(f, training=True)\n#\n# m = KL.TimeDistributed(\n# KL.Conv2D(int(channels//2), (3, 3), padding=\"same\", name=\"sharp_mask_ref_c{}m\".format(stage)),\n# name=\"sharp_mask_ref_td{}b\".format(stage))(x)\n# m = KL.TimeDistributed(BatchNorm(),name='sharp_mask_ref_bn{}m'.format(stage))(m, training=True)\n#\n# out = KL.Concatenate(name=\"sharp_mask_ref_merge{}\".format(stage))([m, f])\n# out = KL.Activation('relu', name=\"sharp_mask_ref_relu{}\".format(stage))(out)\n# return out\n\ndef build_fpn_mask_graph(rois, feature_maps, pool_sizes, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n\n Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]\n \"\"\"\n # ROI Pooling\n # Start from P5 (32 x 32) and sample a region 14x14\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n # 4 iterations over 14 x 14 x 256 rois\n x = MaskROIAlign(pool_sizes[0], name=\"sharp_mask_roi_align1\")([rois, feature_maps[3]])\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"), name=\"sharp_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),name='sharp_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(BilinearUpSampling2D(), name=\"sharp_mask_up1\")(x)\n x = refinement_module_a(x, rois, feature_maps[2], pool_sizes[1], 256, 2, train_bn=train_bn)\n\n x = KL.TimeDistributed(BilinearUpSampling2D(), name=\"sharp_mask_up2\")(x)\n x = refinement_module_a(x, rois, feature_maps[1], pool_sizes[2], 256, 3, train_bn=train_bn)\n\n x = KL.TimeDistributed(BilinearUpSampling2D(), name=\"sharp_mask_up3\")(x)\n x = refinement_module_a(x, rois, feature_maps[0], pool_sizes[3], 256, 4, train_bn=train_bn)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"), name=\"sharp_mask_conv5a\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='sharp_mask_bn5a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"), name=\"sharp_mask_conv5b\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='sharp_mask_bn5b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"), name=\"sharp_mask_conv5c\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='sharp_mask_bn5c')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"), name=\"sharp_mask_conv5d\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='sharp_mask_bn5d')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"relu\"), name=\"sharp_mask_out\")(x)\n x = KL.TimeDistributed(KL.Conv2DTranspose(num_classes, (2, 2), strides=2, activation=\"sigmoid\"),name=\"sharp_mask_deconv\")(x)\n\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n loss = smooth_l1_loss(target_bbox, rpn_bbox)\n \n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = utils.resize(m, config.MASK_SHAPE)\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # TODO: If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argmax(overlaps, axis=0)\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n no_augmentation_sources=None):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The contents\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=None,\n use_mini_mask=config.USE_MINI_MASK)\n else:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(\n image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if random_rois:\n batch_rpn_rois = np.zeros(\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros(\n (batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n BilinearUpSampling2D(name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n BilinearUpSampling2D(name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n BilinearUpSampling2D(name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois,\n mrcnn_feature_maps,\n config.MASK_POOL_SIZES,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n config.MASK_POOL_SIZES,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n # Conditional import to support versions of Keras before 2.2\n # TODO: remove in about 6 months (end of 2018)\n try:\n from keras.engine import saving\n except ImportError:\n # Keras before 2.2 used the 'topology' namespace.\n from keras.engine import topology as saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keep_dims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keep_dims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n\t custom_callbacks: Optional. Add custom callbacks to be called\n\t with the keras fit_generator method. Must be list of type keras.callbacks.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"mask\": r\"(^sharp\\_mask.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE,\n no_augmentation_sources=no_augmentation_sources)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Create log_dir if it does not exist\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=1,\n use_multiprocessing=False,\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name=None):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n" ]
[ [ "numpy.amax", "numpy.expand_dims", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.cast", "tensorflow.image.crop_and_resize", "tensorflow.image.non_max_suppression", "tensorflow.equal", "numpy.concatenate", "tensorflow.abs", "tensorflow.map_fn", "numpy.any", "tensorflow.pad", "tensorflow.where", "tensorflow.random_shuffle", "numpy.where", "tensorflow.add_n", "numpy.divide", "numpy.random.randint", "tensorflow.boolean_mask", "numpy.hstack", "tensorflow.Variable", "numpy.reshape", "numpy.fliplr", "numpy.arange", "tensorflow.squeeze", "numpy.stack", "tensorflow.divide", "tensorflow.stop_gradient", "tensorflow.gather", "numpy.copy", "numpy.argmax", "tensorflow.nn.top_k", "tensorflow.argmax", "numpy.zeros", "tensorflow.tile", "tensorflow.image.resize_bilinear", "numpy.log", "tensorflow.gather_nd", "tensorflow.unique", "tensorflow.shape", "numpy.random.choice", "tensorflow.identity", "tensorflow.exp", "tensorflow.sparse_tensor_to_dense", "numpy.delete", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.split", "tensorflow.round", "numpy.array", "numpy.sum", "tensorflow.size", "tensorflow.reduce_max", "tensorflow.multiply", "tensorflow.transpose", "tensorflow.constant", "tensorflow.range", "tensorflow.reduce_mean", "numpy.abs", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.expand_dims", "numpy.sort", "numpy.ones", "numpy.random.shuffle", "tensorflow.log", "numpy.broadcast_to", "tensorflow.sqrt", "numpy.empty", "tensorflow.logical_and" ] ]
HumaticsLAB/AttentionBasedMultiModalRNN
[ "0c060a97cdddf1348938a5f2d456e83e5f8bf887" ]
[ "config.py" ]
[ "import torch\n\nDEVICE = torch.device('cuda:0')\nDATASET_PATH = \"dataset/images\"\nTRAIN_DATASET = \"dataset/train.csv\"\nTEST_DATASET = \"dataset/test.csv\"\nCOMPOSED_GTREND = \"dataset/gtrends.csv\"\nCATEG_DICT = \"category_labels.pt\"\nCOLOR_DICT = \"color_labels.pt\"\nFAB_DICT = \"fabric_labels.pt\"\nNUM_EPOCHS = 50\nUSE_TEACHERFORCING = True\nTF_RATE = 0.5\nLEARNING_RATE = 0.0001\nNORMALIZATION_VALUES_PATH = \"dataset/normalization_scale.npy\"\nBATCH_SIZE= 128\nSHOW_PLOTS = False\nNUM_WORKERS = 8\nUSE_EXOG = True\nEXOG_NUM = 3\nEXOG_LEN = 52\nHIDDEN_SIZE = 300\nSAVED_FEATURES_PATH = \"incv3_features\"\nUSE_SAVED_FEATURES = False\nNORM = False\nmodel_types = [\"image\", \"concat\", \"residual\", \"cross\"]\nMODEL = 1\n" ]
[ [ "torch.device" ] ]
PauloCirino/deep-learning-coursera
[ "69a89206bf4b0ec3148a1b69a2b31fb79e6adc7c" ]
[ "Neural-Networks-and-Deep-Learning/Logistic Regression as a Neural Network/Logistic Regression with a Neural Network mindset.py" ]
[ "\n# coding: utf-8\n\n# # Logistic Regression with a Neural Network mindset\n# \n# Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.\n# \n# **Instructions:**\n# - Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.\n# \n# **You will learn to:**\n# - Build the general architecture of a learning algorithm, including:\n# - Initializing parameters\n# - Calculating the cost function and its gradient\n# - Using an optimization algorithm (gradient descent) \n# - Gather all three functions above into a main model function, in the right order.\n\n# ## 1 - Packages ##\n# \n# First, let's run the cell below to import all the packages that you will need during this assignment. \n# - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.\n# - [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.\n# - [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.\n# - [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.\n\n# In[1]:\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom lr_utils import load_dataset\n\nget_ipython().magic('matplotlib inline')\n\n\n# ## 2 - Overview of the Problem set ##\n# \n# **Problem Statement**: You are given a dataset (\"data.h5\") containing:\n# - a training set of m_train images labeled as cat (y=1) or non-cat (y=0)\n# - a test set of m_test images labeled as cat or non-cat\n# - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).\n# \n# You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.\n# \n# Let's get more familiar with the dataset. Load the data by running the following code.\n\n# In[2]:\n\n# Loading the data (cat/non-cat)\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()\n\n\n# We added \"_orig\" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).\n# \n# Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images. \n\n# In[3]:\n\n# Example of a picture\nindex = 25\nplt.imshow(train_set_x_orig[index])\nprint (\"y = \" + str(train_set_y[:, index]) + \", it's a '\" + classes[np.squeeze(train_set_y[:, index])].decode(\"utf-8\") + \"' picture.\")\n\n\n# Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. \n# \n# **Exercise:** Find the values for:\n# - m_train (number of training examples)\n# - m_test (number of test examples)\n# - num_px (= height = width of a training image)\n# Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.\n\n# In[4]:\n\n### START CODE HERE ### (≈ 3 lines of code)\nm_train = train_set_x_orig.shape[0]\nm_test = test_set_x_orig.shape[0]\nnum_px = train_set_x_orig.shape[1]\n### END CODE HERE ###\n\nprint (\"Number of training examples: m_train = \" + str(m_train))\nprint (\"Number of testing examples: m_test = \" + str(m_test))\nprint (\"Height/Width of each image: num_px = \" + str(num_px))\nprint (\"Each image is of size: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\nprint (\"train_set_x shape: \" + str(train_set_x_orig.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x shape: \" + str(test_set_x_orig.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\n\n\n# **Expected Output for m_train, m_test and num_px**: \n# <table style=\"width:15%\">\n# <tr>\n# <td>**m_train**</td>\n# <td> 209 </td> \n# </tr>\n# \n# <tr>\n# <td>**m_test**</td>\n# <td> 50 </td> \n# </tr>\n# \n# <tr>\n# <td>**num_px**</td>\n# <td> 64 </td> \n# </tr>\n# \n# </table>\n# \n\n# For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.\n# \n# **Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\\_px $*$ num\\_px $*$ 3, 1).\n# \n# A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use: \n# ```python\n# X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X\n# ```\n\n# In[5]:\n\n# Reshape the training and test examples\n\n### START CODE HERE ### (≈ 2 lines of code)\ntrain_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\ntest_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n### END CODE HERE ###\n\nprint (\"train_set_x_flatten shape: \" + str(train_set_x_flatten.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x_flatten shape: \" + str(test_set_x_flatten.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\nprint (\"sanity check after reshaping: \" + str(train_set_x_flatten[0:5,0]))\n\n\n# **Expected Output**: \n# \n# <table style=\"width:35%\">\n# <tr>\n# <td>**train_set_x_flatten shape**</td>\n# <td> (12288, 209)</td> \n# </tr>\n# <tr>\n# <td>**train_set_y shape**</td>\n# <td>(1, 209)</td> \n# </tr>\n# <tr>\n# <td>**test_set_x_flatten shape**</td>\n# <td>(12288, 50)</td> \n# </tr>\n# <tr>\n# <td>**test_set_y shape**</td>\n# <td>(1, 50)</td> \n# </tr>\n# <tr>\n# <td>**sanity check after reshaping**</td>\n# <td>[17 31 56 22 33]</td> \n# </tr>\n# </table>\n\n# To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.\n# \n# One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).\n# \n# <!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !--> \n# \n# Let's standardize our dataset.\n\n# In[6]:\n\ntrain_set_x = train_set_x_flatten/255.\ntest_set_x = test_set_x_flatten/255.\n\n\n# <font color='blue'>\n# **What you need to remember:**\n# \n# Common steps for pre-processing a new dataset are:\n# - Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)\n# - Reshape the datasets such that each example is now a vector of size (num_px \\* num_px \\* 3, 1)\n# - \"Standardize\" the data\n\n# ## 3 - General Architecture of the learning algorithm ##\n# \n# It's time to design a simple algorithm to distinguish cat images from non-cat images.\n# \n# You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**\n# \n# <img src=\"images/LogReg_kiank.png\" style=\"width:650px;height:400px;\">\n# \n# **Mathematical expression of the algorithm**:\n# \n# For one example $x^{(i)}$:\n# $$z^{(i)} = w^T x^{(i)} + b \\tag{1}$$\n# $$\\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\\tag{2}$$ \n# $$ \\mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \\log(a^{(i)}) - (1-y^{(i)} ) \\log(1-a^{(i)})\\tag{3}$$\n# \n# The cost is then computed by summing over all training examples:\n# $$ J = \\frac{1}{m} \\sum_{i=1}^m \\mathcal{L}(a^{(i)}, y^{(i)})\\tag{6}$$\n# \n# **Key steps**:\n# In this exercise, you will carry out the following steps: \n# - Initialize the parameters of the model\n# - Learn the parameters for the model by minimizing the cost \n# - Use the learned parameters to make predictions (on the test set)\n# - Analyse the results and conclude\n\n# ## 4 - Building the parts of our algorithm ## \n# \n# The main steps for building a Neural Network are:\n# 1. Define the model structure (such as number of input features) \n# 2. Initialize the model's parameters\n# 3. Loop:\n# - Calculate current loss (forward propagation)\n# - Calculate current gradient (backward propagation)\n# - Update parameters (gradient descent)\n# \n# You often build 1-3 separately and integrate them into one function we call `model()`.\n# \n# ### 4.1 - Helper functions\n# \n# **Exercise**: Using your code from \"Python Basics\", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \\frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().\n\n# In[7]:\n\n# GRADED FUNCTION: sigmoid\n\ndef sigmoid(z):\n \"\"\"\n Compute the sigmoid of z\n\n Arguments:\n z -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(z)\n \"\"\"\n\n ### START CODE HERE ### (≈ 1 line of code)\n s = 1.0 / (1.0 + np.exp(-z))\n ### END CODE HERE ###\n \n return s\n\n\n# In[8]:\n\nprint (\"sigmoid([0, 2]) = \" + str(sigmoid(np.array([0,2]))))\n\n\n# **Expected Output**: \n# \n# <table>\n# <tr>\n# <td>**sigmoid([0, 2])**</td>\n# <td> [ 0.5 0.88079708]</td> \n# </tr>\n# </table>\n\n# ### 4.2 - Initializing parameters\n# \n# **Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.\n\n# In[9]:\n\n# GRADED FUNCTION: initialize_with_zeros\n\ndef initialize_with_zeros(dim):\n \"\"\"\n This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.\n \n Argument:\n dim -- size of the w vector we want (or number of parameters in this case)\n \n Returns:\n w -- initialized vector of shape (dim, 1)\n b -- initialized scalar (corresponds to the bias)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n w = np.zeros(shape = (dim, 1) )\n b = 0.0\n ### END CODE HERE ###\n\n assert(w.shape == (dim, 1))\n assert(isinstance(b, float) or isinstance(b, int))\n \n return w, b\n\n\n# In[10]:\n\ndim = 2\nw, b = initialize_with_zeros(dim)\nprint (\"w = \" + str(w))\nprint (\"b = \" + str(b))\n\n\n# **Expected Output**: \n# \n# \n# <table style=\"width:15%\">\n# <tr>\n# <td> ** w ** </td>\n# <td> [[ 0.]\n# [ 0.]] </td>\n# </tr>\n# <tr>\n# <td> ** b ** </td>\n# <td> 0 </td>\n# </tr>\n# </table>\n# \n# For image inputs, w will be of shape (num_px $\\times$ num_px $\\times$ 3, 1).\n\n# ### 4.3 - Forward and Backward propagation\n# \n# Now that your parameters are initialized, you can do the \"forward\" and \"backward\" propagation steps for learning the parameters.\n# \n# **Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.\n# \n# **Hints**:\n# \n# Forward Propagation:\n# - You get X\n# - You compute $A = \\sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$\n# - You calculate the cost function: $J = -\\frac{1}{m}\\sum_{i=1}^{m}y^{(i)}\\log(a^{(i)})+(1-y^{(i)})\\log(1-a^{(i)})$\n# \n# Here are the two formulas you will be using: \n# \n# $$ \\frac{\\partial J}{\\partial w} = \\frac{1}{m}X(A-Y)^T\\tag{7}$$\n# $$ \\frac{\\partial J}{\\partial b} = \\frac{1}{m} \\sum_{i=1}^m (a^{(i)}-y^{(i)})\\tag{8}$$\n\n# In[11]:\n\n# GRADED FUNCTION: propagate\n\ndef propagate(w, b, X, Y):\n \"\"\"\n Implement the cost function and its gradient for the propagation explained above\n\n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)\n\n Return:\n cost -- negative log-likelihood cost for logistic regression\n dw -- gradient of the loss with respect to w, thus same shape as w\n db -- gradient of the loss with respect to b, thus same shape as b\n \n Tips:\n - Write your code step by step for the propagation. np.log(), np.dot()\n \"\"\"\n \n m = X.shape[1]\n \n # FORWARD PROPAGATION (FROM X TO COST)\n ### START CODE HERE ### (≈ 2 lines of code)\n A = sigmoid(np.dot(w.T, X) + b) # compute activation\n cost = (-1 / m)*np.sum(Y * np.log(A) + (1 - Y)*np.log(1-A) ) # compute cost\n ### END CODE HERE ###\n \n # BACKWARD PROPAGATION (TO FIND GRAD)\n ### START CODE HERE ### (≈ 2 lines of code)\n dw = (1/m) * np.dot( X, (A.T - Y.T) )\n db = (1/m) * np.sum(A - Y)\n ### END CODE HERE ###\n\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return grads, cost\n\n\n# In[12]:\n\nw, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])\ngrads, cost = propagate(w, b, X, Y)\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))\nprint (\"cost = \" + str(cost))\n\n\n# **Expected Output**:\n# \n# <table style=\"width:50%\">\n# <tr>\n# <td> ** dw ** </td>\n# <td> [[ 0.99845601]\n# [ 2.39507239]]</td>\n# </tr>\n# <tr>\n# <td> ** db ** </td>\n# <td> 0.00145557813678 </td>\n# </tr>\n# <tr>\n# <td> ** cost ** </td>\n# <td> 5.801545319394553 </td>\n# </tr>\n# \n# </table>\n\n# ### 4.4 - Optimization\n# - You have initialized your parameters.\n# - You are also able to compute a cost function and its gradient.\n# - Now, you want to update the parameters using gradient descent.\n# \n# **Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\\theta$, the update rule is $ \\theta = \\theta - \\alpha \\text{ } d\\theta$, where $\\alpha$ is the learning rate.\n\n# In[13]:\n\n# GRADED FUNCTION: optimize\n\ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n \"\"\"\n This function optimizes w and b by running a gradient descent algorithm\n \n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of shape (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)\n num_iterations -- number of iterations of the optimization loop\n learning_rate -- learning rate of the gradient descent update rule\n print_cost -- True to print the loss every 100 steps\n \n Returns:\n params -- dictionary containing the weights w and bias b\n grads -- dictionary containing the gradients of the weights and bias with respect to the cost function\n costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.\n \n Tips:\n You basically need to write down two steps and iterate through them:\n 1) Calculate the cost and the gradient for the current parameters. Use propagate().\n 2) Update the parameters using gradient descent rule for w and b.\n \"\"\"\n \n costs = []\n \n for i in range(num_iterations):\n \n \n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ### \n grads, cost = propagate(w, b, X, Y)\n ### END CODE HERE ###\n \n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate*dw\n b = b - learning_rate*db\n ### END CODE HERE ###\n \n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n \n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs\n\n\n# In[14]:\n\nparams, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)\n\nprint (\"w = \" + str(params[\"w\"]))\nprint (\"b = \" + str(params[\"b\"]))\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))\n\n\n# **Expected Output**: \n# \n# <table style=\"width:40%\">\n# <tr>\n# <td> **w** </td>\n# <td>[[ 0.19033591]\n# [ 0.12259159]] </td>\n# </tr>\n# \n# <tr>\n# <td> **b** </td>\n# <td> 1.92535983008 </td>\n# </tr>\n# <tr>\n# <td> **dw** </td>\n# <td> [[ 0.67752042]\n# [ 1.41625495]] </td>\n# </tr>\n# <tr>\n# <td> **db** </td>\n# <td> 0.219194504541 </td>\n# </tr>\n# \n# </table>\n\n# **Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:\n# \n# 1. Calculate $\\hat{Y} = A = \\sigma(w^T X + b)$\n# \n# 2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this). \n\n# In[15]:\n\n# GRADED FUNCTION: predict\n\ndef predict(w, b, X):\n '''\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n \n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n \n Returns:\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\n '''\n \n m = X.shape[1]\n Y_prediction = np.zeros((1,m))\n w = w.reshape(X.shape[0], 1)\n \n # Compute vector \"A\" predicting the probabilities of a cat being present in the picture\n ### START CODE HERE ### (≈ 1 line of code)\n A = sigmoid(np.dot(w.T, X) + b)\n ### END CODE HERE ###\n \n Y_prediction = np.round(A) ## Vectorized\n \n assert(Y_prediction.shape == (1, m))\n \n return Y_prediction\n\n\n# In[16]:\n\nw = np.array([[0.1124579],[0.23106775]])\nb = -0.3\nX = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])\nprint (\"predictions = \" + str(predict(w, b, X)))\n\n\n# **Expected Output**: \n# \n# <table style=\"width:30%\">\n# <tr>\n# <td>\n# **predictions**\n# </td>\n# <td>\n# [[ 1. 1. 0.]]\n# </td> \n# </tr>\n# \n# </table>\n# \n\n# <font color='blue'>\n# **What to remember:**\n# You've implemented several functions that:\n# - Initialize (w,b)\n# - Optimize the loss iteratively to learn parameters (w,b):\n# - computing the cost and its gradient \n# - updating the parameters using gradient descent\n# - Use the learned (w,b) to predict the labels for a given set of examples\n\n# ## 5 - Merge all functions into a model ##\n# \n# You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.\n# \n# **Exercise:** Implement the model function. Use the following notation:\n# - Y_prediction_test for your predictions on the test set\n# - Y_prediction_train for your predictions on the train set\n# - w, costs, grads for the outputs of optimize()\n\n# In[22]:\n\nX_train, Y_train, X_test, Y_test = (train_set_x, train_set_y, test_set_x, test_set_y)\nnum_iterations = 2000\nlearning_rate = 0.5\nprint_cost = False\n\nw, b = initialize_with_zeros(dim = X_train.shape[0])\n\nprint('w.shape =', w.shape)\nprint('X_train.shape =', X_train.shape)\nprint('Y_train.shape =', Y_train.shape)\n\n\n# In[23]:\n\n# GRADED FUNCTION: model\n\ndef model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):\n \"\"\"\n Builds the logistic regression model by calling the function you've implemented previously\n \n Arguments:\n X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)\n Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)\n X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)\n Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)\n num_iterations -- hyperparameter representing the number of iterations to optimize the parameters\n learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()\n print_cost -- Set to true to print the cost every 100 iterations\n \n Returns:\n d -- dictionary containing information about the model.\n \"\"\"\n \n ### START CODE HERE ###\n \n # initialize parameters with zeros (≈ 1 line of code)\n w, b = initialize_with_zeros(dim = X_train.shape[0])\n\n # Gradient descent (≈ 1 line of code)\n parameters, grads, costs = optimize(b = b,\n w = w,\n X = X_train,\n Y = Y_train, \n learning_rate = learning_rate,\n num_iterations = num_iterations)\n \n # Retrieve parameters w and b from dictionary \"parameters\"\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n \n # Predict test/train set examples (≈ 2 lines of code)\n Y_prediction_test = predict(w = w, b = b, X = X_test)\n Y_prediction_train = predict(w = w, b = b, X = X_train)\n\n ### END CODE HERE ###\n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n\n \n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test, \n \"Y_prediction_train\" : Y_prediction_train, \n \"w\" : w, \n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n \n return d\n\n\n# Run the following cell to train your model.\n\n# In[24]:\n\nd = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)\n\n\n# **Expected Output**: \n# \n# <table style=\"width:40%\"> \n# \n# <tr>\n# <td> **Cost after iteration 0 ** </td> \n# <td> 0.693147 </td>\n# </tr>\n# <tr>\n# <td> <center> $\\vdots$ </center> </td> \n# <td> <center> $\\vdots$ </center> </td> \n# </tr> \n# <tr>\n# <td> **Train Accuracy** </td> \n# <td> 99.04306220095694 % </td>\n# </tr>\n# \n# <tr>\n# <td>**Test Accuracy** </td> \n# <td> 70.0 % </td>\n# </tr>\n# </table> \n# \n# \n# \n\n# **Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!\n# \n# Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.\n\n# In[25]:\n\n# Example of a picture that was wrongly classified.\nindex = 1\nplt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))\nprint (\"y = \" + str(test_set_y[0,index]) + \", you predicted that it is a \\\"\" + classes[d[\"Y_prediction_test\"][0,index]].decode(\"utf-8\") + \"\\\" picture.\")\n\n\n# Let's also plot the cost function and the gradients.\n\n# In[26]:\n\n# Plot learning curve (with costs)\ncosts = np.squeeze(d['costs'])\nplt.plot(costs)\nplt.ylabel('cost')\nplt.xlabel('iterations (per hundreds)')\nplt.title(\"Learning rate =\" + str(d[\"learning_rate\"]))\nplt.show()\n\n\n# **Interpretation**:\n# You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. \n\n# ## 6 - Further analysis (optional/ungraded exercise) ##\n# \n# Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\\alpha$. \n\n# #### Choice of learning rate ####\n# \n# **Reminder**:\n# In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may \"overshoot\" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.\n# \n# Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens. \n\n# In[27]:\n\nlearning_rates = [0.01, 0.001, 0.0001]\nmodels = {}\nfor i in learning_rates:\n print (\"learning rate is: \" + str(i))\n models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)\n print ('\\n' + \"-------------------------------------------------------\" + '\\n')\n\nfor i in learning_rates:\n plt.plot(np.squeeze(models[str(i)][\"costs\"]), label= str(models[str(i)][\"learning_rate\"]))\n\nplt.ylabel('cost')\nplt.xlabel('iterations (hundreds)')\n\nlegend = plt.legend(loc='upper center', shadow=True)\nframe = legend.get_frame()\nframe.set_facecolor('0.90')\nplt.show()\n\n\n# **Interpretation**: \n# - Different learning rates give different costs and thus different predictions results.\n# - If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). \n# - A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.\n# - In deep learning, we usually recommend that you: \n# - Choose the learning rate that better minimizes the cost function.\n# - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) \n# \n\n# ## 7 - Test with your own image (optional/ungraded exercise) ##\n# \n# Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:\n# 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n# 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n# 3. Change your image's name in the following code\n# 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!\n\n# In[29]:\n\n## START CODE HERE ## (PUT YOUR IMAGE NAME) \nmy_image = \"cat.jpg\" # change this to the name of your image file \n## END CODE HERE ##\n\n# We preprocess the image to fit your algorithm.\nfname = \"images/\" + my_image\nimage = np.array(ndimage.imread(fname, flatten=False))\nmy_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T\nmy_predicted_image = predict(d[\"w\"], d[\"b\"], my_image)\n\nplt.imshow(image)\nprint(\"y = \" + str(np.squeeze(my_predicted_image)) + \", your algorithm predicts a \\\"\" + classes[int(np.squeeze(my_predicted_image)),].decode(\"utf-8\") + \"\\\" picture.\")\n\n\n# <font color='blue'>\n# **What to remember from this assignment:**\n# 1. Preprocessing the dataset is important.\n# 2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().\n# 3. Tuning the learning rate (which is an example of a \"hyperparameter\") can make a big difference to the algorithm. You will see more examples of this later in this course!\n\n# Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:\n# - Play with the learning rate and the number of iterations\n# - Try different initialization methods and compare the results\n# - Test other preprocessings (center the data, or divide each row by its standard deviation)\n\n# Bibliography:\n# - http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/\n# - https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.imshow", "numpy.dot", "scipy.misc.imresize", "scipy.ndimage.imread", "numpy.log", "numpy.abs", "numpy.squeeze", "matplotlib.pyplot.plot", "numpy.round", "numpy.exp", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.zeros", "numpy.sum", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
mtsolmn/lantz-drivers
[ "f48caf9000ddd08f2abb837d832e341410af4788" ]
[ "lantz/drivers/microsoft/usbcam.py" ]
[ "import cv2\nimport numpy as np\nfrom lantz.core import Action, Driver, Feat\n\n\nclass USBCam(Driver):\n\n def __init__(self, device_id):\n self.device_id = device_id\n self._flipud = False\n self._fliplr = False\n self._rotation = 0\n return\n\n def initialize(self):\n self.capture = cv2.VideoCapture(self.device_id)\n return\n\n def finalize(self):\n self.capture.release()\n return\n\n @Feat(values={0, 90, 180, 270})\n def rotation(self):\n return self._rotation\n\n @rotation.setter\n def rotation(self, value):\n self._rotation = value\n return\n\n @Feat(values={True, False})\n def flipud(self):\n return self._flipud\n\n @flipud.setter\n def flipud(self, value):\n self._flipud = value\n return\n\n @Feat(values={True, False})\n def fliplr(self):\n return self._fliplr\n\n @fliplr.setter\n def fliplr(self, value):\n self._fliplr = value\n return\n\n @Action()\n def get_frame(self):\n img = self.capture.read()[1]\n array = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if self._flipud:\n array = np.flipud(array)\n if self._fliplr:\n array = np.fliplr(array)\n array = np.rot90(array, k=int(self._rotation / 90))\n return array\n" ]
[ [ "numpy.fliplr", "numpy.flipud" ] ]
uniLee1119/Final-NeuralFBProphet
[ "40caac12a1805da6a061452ea5571b48d6f2bb8f" ]
[ "src/Final-NeuralFBProphet/prop_two_optim.py" ]
[ "import argparse\n\nimport joblib\nimport numpy as np\nimport optuna\nfrom fbprophet import Prophet\nfrom optuna import Trial\nfrom optuna.samplers import TPESampler\nfrom sklearn.metrics import mean_squared_error\n\nfrom data.dataset import two_seconds_dataset\n\nparse = argparse.ArgumentParser(\"Optimize\")\nparse.add_argument(\"--path\", type=str, default=\"../../input/\")\nparse.add_argument(\"--trials\", type=int, default=20)\nparse.add_argument(\"--params\", type=str, default=\"two_second_params.pkl\")\nargs = parse.parse_args()\n\ndf, train, valid = two_seconds_dataset(args.path)\ntrain = df[df[\"ds\"] < \"2021-2-10\"]\nvalid = df[df[\"ds\"] >= \"2021-2-10\"]\nvalid[\"days\"] = valid[\"ds\"].apply(lambda x: x.day)\nvalid[\"hour\"] = valid[\"ds\"].apply(lambda x: x.hour)\nvalid[\"days_hour\"] = valid[\"days\"].astype(str) + \"_\" + valid[\"hour\"].astype(str)\nvalid = valid.groupby(\"days_hour\")[\"y\"].agg(\"mean\").reset_index()\ncap = np.max(train.y)\nfloor = np.min(train.y)\n\n\ndef objective(trial: Trial) -> float:\n params = {\n \"changepoint_range\": trial.suggest_discrete_uniform(\n \"changepoint_range\", 0.8, 0.95, 0.001\n ),\n \"n_changepoints\": trial.suggest_int(\"n_changepoints\", 20, 35),\n \"changepoint_prior_scale\": trial.suggest_discrete_uniform(\n \"changepoint_prior_scale\", 0.001, 0.5, 0.001\n ),\n \"seasonality_prior_scale\": trial.suggest_discrete_uniform(\n \"seasonality_prior_scale\", 1, 25, 0.5\n ),\n \"growth\": \"logistic\",\n \"seasonality_mode\": \"additive\",\n \"yearly_seasonality\": False,\n \"weekly_seasonality\": True,\n \"daily_seasonality\": True,\n }\n # fit_model\n m = Prophet(**params)\n train[\"cap\"] = cap\n train[\"floor\"] = floor\n m.fit(train)\n future = m.make_future_dataframe(periods=163, freq=\"H\")\n\n future[\"cap\"] = cap\n future[\"floor\"] = floor\n\n forecast = m.predict(future)\n valid_forecast = forecast.tail(163)\n val_rmse = mean_squared_error(valid.y, valid_forecast.yhat, squared=False)\n\n return val_rmse\n\n\nif __name__ == \"__main__\":\n study = optuna.create_study(\n study_name=\"ontune hyperparameter\",\n direction=\"minimize\",\n sampler=TPESampler(seed=42),\n )\n study.optimize(objective, n_trials=args.trials)\n prophet_params = study.best_params\n prophet_params[\"growth\"] = \"logistic\"\n prophet_params[\"seasonality_mode\"] = \"additive\"\n prophet_params[\"weekly_seasonality\"] = True\n prophet_params[\"daily_seasonality\"] = True\n prophet_params[\"yearly_seasonality\"] = False\n joblib.dump(prophet_params, \"../../parameters/\" + args.params)\n" ]
[ [ "sklearn.metrics.mean_squared_error", "numpy.max", "numpy.min" ] ]
millerda/seaborn
[ "5a67fa98ed4efa5b3761f2d9d184fb8addfac6de" ]
[ "seaborn/rcmod.py" ]
[ "\"\"\"Control plot style and scaling using the matplotlib rcParams interface.\"\"\"\nimport warnings\nimport functools\nimport matplotlib as mpl\nfrom cycler import cycler\nfrom . import palettes\n\n\n__all__ = [\"set_theme\", \"set\", \"reset_defaults\", \"reset_orig\",\n \"axes_style\", \"set_style\", \"plotting_context\", \"set_context\",\n \"set_palette\"]\n\n\n_style_keys = [\n\n \"axes.facecolor\",\n \"axes.edgecolor\",\n \"axes.grid\",\n \"axes.axisbelow\",\n \"axes.labelcolor\",\n\n \"figure.facecolor\",\n\n \"grid.color\",\n \"grid.linestyle\",\n\n \"text.color\",\n\n \"xtick.color\",\n \"ytick.color\",\n \"xtick.direction\",\n \"ytick.direction\",\n \"lines.solid_capstyle\",\n\n \"patch.edgecolor\",\n \"patch.force_edgecolor\",\n\n \"image.cmap\",\n \"font.family\",\n \"font.sans-serif\",\n\n \"xtick.bottom\",\n \"xtick.top\",\n \"ytick.left\",\n \"ytick.right\",\n\n \"axes.spines.left\",\n \"axes.spines.bottom\",\n \"axes.spines.right\",\n \"axes.spines.top\",\n\n]\n\n_context_keys = [\n\n \"font.size\",\n \"axes.labelsize\",\n \"axes.titlesize\",\n \"xtick.labelsize\",\n \"ytick.labelsize\",\n \"legend.fontsize\",\n \"legend.title_fontsize\",\n\n \"axes.linewidth\",\n \"grid.linewidth\",\n \"lines.linewidth\",\n \"lines.markersize\",\n \"patch.linewidth\",\n\n \"xtick.major.width\",\n \"ytick.major.width\",\n \"xtick.minor.width\",\n \"ytick.minor.width\",\n\n \"xtick.major.size\",\n \"ytick.major.size\",\n \"xtick.minor.size\",\n \"ytick.minor.size\",\n\n]\n\n\ndef set_theme(context=\"notebook\", style=\"darkgrid\", palette=\"deep\",\n font=\"sans-serif\", font_scale=1, color_codes=True, rc=None):\n \"\"\"Set multiple theme parameters in one step.\n\n Each set of parameters can be set directly or temporarily, see the\n referenced functions below for more information.\n\n Parameters\n ----------\n context : string or dict\n Plotting context parameters, see :func:`plotting_context`.\n style : string or dict\n Axes style parameters, see :func:`axes_style`.\n palette : string or sequence\n Color palette, see :func:`color_palette`.\n font : string\n Font family, see matplotlib font manager.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n color_codes : bool\n If ``True`` and ``palette`` is a seaborn palette, remap the shorthand\n color codes (e.g. \"b\", \"g\", \"r\", etc.) to the colors from this palette.\n rc : dict or None\n Dictionary of rc parameter mappings to override the above.\n\n \"\"\"\n set_context(context, font_scale)\n set_style(style, rc={\"font.family\": font})\n set_palette(palette, color_codes=color_codes)\n if rc is not None:\n mpl.rcParams.update(rc)\n\n\ndef set(*args, **kwargs):\n \"\"\"Alias for :func:`set_theme`, which is the preferred interface.\"\"\"\n set_theme(*args, **kwargs)\n\n\ndef reset_defaults():\n \"\"\"Restore all RC params to default settings.\"\"\"\n mpl.rcParams.update(mpl.rcParamsDefault)\n\n\ndef reset_orig():\n \"\"\"Restore all RC params to original settings (respects custom rc).\"\"\"\n from . import _orig_rc_params\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', mpl.cbook.MatplotlibDeprecationWarning)\n mpl.rcParams.update(_orig_rc_params)\n\n\ndef axes_style(style=None, rc=None):\n \"\"\"Return a parameter dict for the aesthetic style of the plots.\n\n This affects things like the color of the axes, whether a grid is\n enabled by default, and other aesthetic elements.\n\n This function returns an object that can be used in a ``with`` statement\n to temporarily change the style parameters.\n\n Parameters\n ----------\n style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured set.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n >>> st = axes_style(\"whitegrid\")\n\n >>> set_style(\"ticks\", {\"xtick.major.size\": 8, \"ytick.major.size\": 8})\n\n >>> import matplotlib.pyplot as plt\n >>> with axes_style(\"white\"):\n ... f, ax = plt.subplots()\n ... ax.plot(x, y) # doctest: +SKIP\n\n See Also\n --------\n set_style : set the matplotlib parameters for a seaborn theme\n plotting_context : return a parameter dict to to scale plot elements\n color_palette : define the color palette for a plot\n\n \"\"\"\n if style is None:\n style_dict = {k: mpl.rcParams[k] for k in _style_keys}\n\n elif isinstance(style, dict):\n style_dict = style\n\n else:\n styles = [\"white\", \"dark\", \"whitegrid\", \"darkgrid\", \"ticks\"]\n if style not in styles:\n raise ValueError(\"style must be one of %s\" % \", \".join(styles))\n\n # Define colors here\n dark_gray = \".15\"\n light_gray = \".8\"\n\n # Common parameters\n style_dict = {\n\n \"figure.facecolor\": \"white\",\n \"axes.labelcolor\": dark_gray,\n\n \"xtick.direction\": \"out\",\n \"ytick.direction\": \"out\",\n \"xtick.color\": dark_gray,\n \"ytick.color\": dark_gray,\n\n \"axes.axisbelow\": True,\n \"grid.linestyle\": \"-\",\n\n\n \"text.color\": dark_gray,\n \"font.family\": [\"sans-serif\"],\n \"font.sans-serif\": [\"Arial\", \"DejaVu Sans\", \"Liberation Sans\",\n \"Bitstream Vera Sans\", \"sans-serif\"],\n\n\n \"lines.solid_capstyle\": \"round\",\n \"patch.edgecolor\": \"w\",\n \"patch.force_edgecolor\": True,\n\n \"image.cmap\": \"rocket\",\n\n \"xtick.top\": False,\n \"ytick.right\": False,\n\n }\n\n # Set grid on or off\n if \"grid\" in style:\n style_dict.update({\n \"axes.grid\": True,\n })\n else:\n style_dict.update({\n \"axes.grid\": False,\n })\n\n # Set the color of the background, spines, and grids\n if style.startswith(\"dark\"):\n style_dict.update({\n\n \"axes.facecolor\": \"#EAEAF2\",\n \"axes.edgecolor\": \"white\",\n \"grid.color\": \"white\",\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n elif style == \"whitegrid\":\n style_dict.update({\n\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": light_gray,\n \"grid.color\": light_gray,\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n elif style in [\"white\", \"ticks\"]:\n style_dict.update({\n\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": dark_gray,\n \"grid.color\": light_gray,\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n # Show or hide the axes ticks\n if style == \"ticks\":\n style_dict.update({\n \"xtick.bottom\": True,\n \"ytick.left\": True,\n })\n else:\n style_dict.update({\n \"xtick.bottom\": False,\n \"ytick.left\": False,\n })\n\n # Remove entries that are not defined in the base list of valid keys\n # This lets us handle matplotlib <=/> 2.0\n style_dict = {k: v for k, v in style_dict.items() if k in _style_keys}\n\n # Override these settings with the provided rc dictionary\n if rc is not None:\n rc = {k: v for k, v in rc.items() if k in _style_keys}\n style_dict.update(rc)\n\n # Wrap in an _AxesStyle object so this can be used in a with statement\n style_object = _AxesStyle(style_dict)\n\n return style_object\n\n\ndef set_style(style=None, rc=None):\n \"\"\"Set the aesthetic style of the plots.\n\n This affects things like the color of the axes, whether a grid is\n enabled by default, and other aesthetic elements.\n\n Parameters\n ----------\n style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured set.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n >>> set_style(\"whitegrid\")\n\n >>> set_style(\"ticks\", {\"xtick.major.size\": 8, \"ytick.major.size\": 8})\n\n See Also\n --------\n axes_style : return a dict of parameters or use in a ``with`` statement\n to temporarily set the style.\n set_context : set parameters to scale plot elements\n set_palette : set the default color palette for figures\n\n \"\"\"\n style_object = axes_style(style, rc)\n mpl.rcParams.update(style_object)\n\n\ndef plotting_context(context=None, font_scale=1, rc=None):\n \"\"\"Return a parameter dict to scale elements of the figure.\n\n This affects things like the size of the labels, lines, and other\n elements of the plot, but not the overall style. The base context\n is \"notebook\", and the other contexts are \"paper\", \"talk\", and \"poster\",\n which are version of the notebook parameters scaled by .8, 1.3, and 1.6,\n respectively.\n\n This function returns an object that can be used in a ``with`` statement\n to temporarily change the context parameters.\n\n Parameters\n ----------\n context : dict, None, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n >>> c = plotting_context(\"poster\")\n\n >>> c = plotting_context(\"notebook\", font_scale=1.5)\n\n >>> c = plotting_context(\"talk\", rc={\"lines.linewidth\": 2})\n\n >>> import matplotlib.pyplot as plt\n >>> with plotting_context(\"paper\"):\n ... f, ax = plt.subplots()\n ... ax.plot(x, y) # doctest: +SKIP\n\n See Also\n --------\n set_context : set the matplotlib parameters to scale plot elements\n axes_style : return a dict of parameters defining a figure style\n color_palette : define the color palette for a plot\n\n \"\"\"\n if context is None:\n context_dict = {k: mpl.rcParams[k] for k in _context_keys}\n\n elif isinstance(context, dict):\n context_dict = context\n\n else:\n\n contexts = [\"paper\", \"notebook\", \"talk\", \"poster\"]\n if context not in contexts:\n raise ValueError(\"context must be in %s\" % \", \".join(contexts))\n\n # Set up dictionary of default parameters\n texts_base_context = {\n\n \"font.size\": 12,\n \"axes.labelsize\": 12,\n \"axes.titlesize\": 12,\n \"xtick.labelsize\": 11,\n \"ytick.labelsize\": 11,\n \"legend.fontsize\": 11,\n \"legend.title_fontsize\": 12,\n\n }\n\n base_context = {\n\n \"axes.linewidth\": 1.25,\n \"grid.linewidth\": 1,\n \"lines.linewidth\": 1.5,\n \"lines.markersize\": 6,\n \"patch.linewidth\": 1,\n\n \"xtick.major.width\": 1.25,\n \"ytick.major.width\": 1.25,\n \"xtick.minor.width\": 1,\n \"ytick.minor.width\": 1,\n\n \"xtick.major.size\": 6,\n \"ytick.major.size\": 6,\n \"xtick.minor.size\": 4,\n \"ytick.minor.size\": 4,\n\n }\n base_context.update(texts_base_context)\n\n # Scale all the parameters by the same factor depending on the context\n scaling = dict(paper=.8, notebook=1, talk=1.5, poster=2)[context]\n context_dict = {k: v * scaling for k, v in base_context.items()}\n\n # Now independently scale the fonts\n font_keys = texts_base_context.keys()\n font_dict = {k: context_dict[k] * font_scale for k in font_keys}\n context_dict.update(font_dict)\n\n # Override these settings with the provided rc dictionary\n if rc is not None:\n rc = {k: v for k, v in rc.items() if k in _context_keys}\n context_dict.update(rc)\n\n # Wrap in a _PlottingContext object so this can be used in a with statement\n context_object = _PlottingContext(context_dict)\n\n return context_object\n\n\ndef set_context(context=None, font_scale=1, rc=None):\n \"\"\"Set the plotting context parameters.\n\n This affects things like the size of the labels, lines, and other\n elements of the plot, but not the overall style. The base context\n is \"notebook\", and the other contexts are \"paper\", \"talk\", and \"poster\",\n which are version of the notebook parameters scaled by .8, 1.3, and 1.6,\n respectively.\n\n Parameters\n ----------\n context : dict, None, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n >>> set_context(\"paper\")\n\n >>> set_context(\"talk\", font_scale=1.4)\n\n >>> set_context(\"talk\", rc={\"lines.linewidth\": 2})\n\n See Also\n --------\n plotting_context : return a dictionary of rc parameters, or use in\n a ``with`` statement to temporarily set the context.\n set_style : set the default parameters for figure style\n set_palette : set the default color palette for figures\n\n \"\"\"\n context_object = plotting_context(context, font_scale, rc)\n mpl.rcParams.update(context_object)\n\n\nclass _RCAesthetics(dict):\n def __enter__(self):\n rc = mpl.rcParams\n self._orig = {k: rc[k] for k in self._keys}\n self._set(self)\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._set(self._orig)\n\n def __call__(self, func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\nclass _AxesStyle(_RCAesthetics):\n \"\"\"Light wrapper on a dict to set style temporarily.\"\"\"\n _keys = _style_keys\n _set = staticmethod(set_style)\n\n\nclass _PlottingContext(_RCAesthetics):\n \"\"\"Light wrapper on a dict to set context temporarily.\"\"\"\n _keys = _context_keys\n _set = staticmethod(set_context)\n\n\ndef set_palette(palette, n_colors=None, desat=None, color_codes=False):\n \"\"\"Set the matplotlib color cycle using a seaborn palette.\n\n Parameters\n ----------\n palette : seaborn color paltte | matplotlib colormap | hls | husl\n Palette definition. Should be something that :func:`color_palette`\n can process.\n n_colors : int\n Number of colors in the cycle. The default number of colors will depend\n on the format of ``palette``, see the :func:`color_palette`\n documentation for more information.\n desat : float\n Proportion to desaturate each color by.\n color_codes : bool\n If ``True`` and ``palette`` is a seaborn palette, remap the shorthand\n color codes (e.g. \"b\", \"g\", \"r\", etc.) to the colors from this palette.\n\n Examples\n --------\n >>> set_palette(\"Reds\")\n\n >>> set_palette(\"Set1\", 8, .75)\n\n See Also\n --------\n color_palette : build a color palette or set the color cycle temporarily\n in a ``with`` statement.\n set_context : set parameters to scale plot elements\n set_style : set the default parameters for figure style\n\n \"\"\"\n colors = palettes.color_palette(palette, n_colors, desat)\n cyl = cycler('color', colors)\n mpl.rcParams['axes.prop_cycle'] = cyl\n mpl.rcParams[\"patch.facecolor\"] = colors[0]\n if color_codes:\n try:\n palettes.set_color_codes(palette)\n except (ValueError, TypeError):\n pass\n" ]
[ [ "matplotlib.rcParams.update" ] ]
brenobeirigo/vrplot
[ "e9ec5450940bd576f43c4ed07f17ae228dbd0eb8" ]
[ "vrplot/animated.py" ]
[ "################################################################################\n## PLOT ########################################################################\n################################################################################\n\n# ANIMATIONS\n#%matplotlib widget\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom IPython.display import HTML\nfrom vrplot import util, static\n\n\ndef show_solutions(solutions, coords, node_labels, fig=None, ax=None, figsize=(8,8), vehicle_route_colors = ['#e41a1c', '#377eb8','#4daf4a','#984ea3','#ff7f00', '#ffff33']):\n if ax == None or fig==None:\n figure, axis = plt.subplots(figsize=figsize)\n axis.set_xlim(0,1)\n axis.set_ylim(0,1)\n lim = 0,1,0,1\n else:\n lim = None\n figure, axis = fig, ax\n\n animator = animation.FuncAnimation(\n figure,\n static.draw_routes,\n solutions,\n fargs=(coords, node_labels, vehicle_route_colors, axis, lim),\n interval=100,\n repeat=False)\n plt.close()\n return HTML(animator.to_html5_video())\n\n\ndef construct_route(route, coords, node_labels, fig=None, ax=None, figsize=(8,8), route_color=\"red\"):\n \n if ax == None or fig==None:\n figure, axis = plt.subplots(figsize=figsize)\n axis.set_xlim(0,1)\n axis.set_ylim(0,1)\n else:\n figure, axis = fig, ax\n \n static.draw_nodes(coords, node_labels, axis) \n axis.set_title(static.cost_header(util.get_cost(route, coords)), pad=10)\n\n edges = list(zip(route[:-1], route[1:]))\n\n animator = animation.FuncAnimation(\n figure,\n static.draw_edge,\n edges,\n fargs=(coords, axis, route_color),\n interval=100,\n repeat=False)\n \n plt.close()\n return HTML(animator.to_html5_video())\n\n # If not closed, plot the first solution\n #plt.close()\n #return HTML(animator.to_html5_video())\n \n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.close" ] ]
Cryaaa/tribolium-clustering
[ "f5751ec8c007e95e8a9688d2d8e34508b04f0822" ]
[ "tribolium_clustering/data_visualisation/_plot_cvi_each_timepoint_3D.py" ]
[ "def plot_cvi_each_timepoint_3D(cvi_scores_concatenated, timepoints_list, cluster_numbers, cvi_name = '', timepoint_label = 'Timepoints'):\n '''Plots a 3D plot displaying timepoint indices, cluster numbers and their cluster validation index scores\n \n Parameters\n ----------\n cvi_scores_concatenated : CVI scores list\n CVI scores in the form [cvi-t1,...,cvi-tn]2,...,[cvi-t1,...,cvi-tn]k concatenated\n timepoints_list : list\n Timepoints used as a list (can also just be indices)\n cluster_numbers : list\n cluster numbers used sorted: [2,..,k]\n cvi_name : string\n label for the axis of the CVI score \n timepoint_label : string\n label for the timepoint axis\n \n\n '''\n \n import numpy as np\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from matplotlib import cm\n\n x = np.array(timepoints_list)\n y = np.array(cluster_numbers)\n z = cvi_scores_concatenated\n\n X, Y = np.meshgrid(x, y)\n Z = np.reshape(z, X.shape) # Z.shape must be equal to X.shape = Y.shape\n\n fig = plt.figure(figsize = (20,20))\n ax = fig.add_subplot(projection='3d')\n\n ax.plot_surface(X, Y, Z,cmap=cm.coolwarm)\n\n ax.set_xlabel(timepoint_label)\n ax.set_ylabel('Number of Clusters')\n ax.set_zlabel(cvi_name)\n\n ax.view_init(elev=20., azim=65)\n\n plt.show()\n\n" ]
[ [ "numpy.meshgrid", "numpy.reshape", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
beconstant/urnn
[ "7c74d0eff7181756c080cd44cce732cef2089242" ]
[ "utils/theano_complex_extension.py" ]
[ "import numpy as np\n\nfrom theano import tensor\n\n\n#------------------------------------------------------------------------------\n# Complex theano funcs\n\ndef frac(A):\n return A[0, :, :], A[1, :, :]\n\n\ndef skew_frac(A):\n return tensor.tril(A, -1) - tensor.tril(A, -1).T,\\\n tensor.triu(A, 0).T + tensor.triu(A, 1)\n\n\ndef skew_hermitian_parametrized(skew):\n skew_real, skew_imag = frac(skew)\n A = tensor.tril(skew_real, -1) + tensor.tril(skew_imag, 0).T\n #A = np.zeros_like(skew.shape, dtype=np.float)\n #A = tensor.tri(*skew.shape, -1) * skew.real + (np.tri(*skew.shape, 0) * skew.imag).T\n return A\n\n\ndef real(A):\n return A[0, :, :]\n\n\ndef imag(A):\n return A[1, :, :]\n\n\ndef zeros(shape):\n return tensor.zeros((2,) + shape)\n\n\ndef identity(n):\n return tensor.stack([tensor.eye(n), tensor.zeros((n, n))], axis=0)\n\n\ndef complex_dot(A, B):\n A_real, A_imag = frac(A)\n B_real, B_imag = frac(B)\n prod = tensor.zeros((2, A_real.shape[0], B_real.shape[1]))\n prod = tensor.set_subtensor(prod[0, :, :], A_real.dot(B_real) - A_imag.dot(B_imag))\n prod = tensor.set_subtensor(prod[1, :, :], A_real.dot(B_imag) + A_imag.dot(B_real))\n return prod\n\n\ndef complex_matrix_dot(*args):\n \"\"\" Shorthand for product between several dots.\n Given :math:`N` matrices :math:`A_0, A_1, .., A_N`, ``matrix_dot`` will\n generate the matrix product between all in the given order, namely\n :math:`A_0 \\cdot A_1 \\cdot A_2 \\cdot .. \\cdot A_N`.\n \"\"\"\n rval = args[0]\n for a in args[1:]:\n rval = complex_dot(rval, a)\n return rval\n\n\ndef transpose(X):\n if X.ndim - 1 > 2:\n raise ValueError(\"only matrix transpose is allowed, but X have dimension {}\".format(X.ndim - 1))\n return tensor.transpose(X, axes=(0, 2, 1))\n\n\ndef conj(X):\n X_conj = tensor.copy(X)\n tensor.set_subtensor(X[1, :, :], -1 * X[1, :, :])\n return X_conj\n\n\ndef hconj(X):\n X_hconj = tensor.transpose(X, axes=(0, 2, 1))\n X_hconj = tensor.set_subtensor(X_hconj[1, :, :], -1 * X_hconj[1, :, :])\n return X_hconj\n\n\ndef complex_reshape(x, shape, ndim=None):\n if ndim is not None:\n return x.reshape(tensor.concatenate([(2,), shape]), ndim + 1)\n return x.reshape((2,) + shape, ndim)\n\n\ndef complex_tensordot(a, b, axes=2):\n AR, AI = a[0, ...], a[1, ...]\n BR, BI = b[0, ...], b[1, ...]\n\n output = tensor.stack([\n tensor.tensordot(AR, BR, axes=axes) - tensor.tensordot(AI, BI, axes=axes),\n tensor.tensordot(AR, BI, axes=axes) + tensor.tensordot(AI, BR, axes=axes),\n ], axis=0)\n return output\n\n\ndef apply_complex_mat_to_kronecker(x, matrices):\n x = x.reshape((2, x.shape[1]) + tuple(mat.shape[1] for mat in matrices))\n result = x\n for mat in matrices:\n print(x.ndim)\n print(mat)\n result = complex_tensordot(result, mat, axes=([1], [0]))\n return result\n\n\n#------------------------------------------------------------------------------\n# Ordinary theano funcs\n\n\ndef apply_mat_to_kronecker(x, matrices):\n x = x.reshape((x.shape[0],) + tuple(mat.shape[0] for mat in matrices))\n result = x\n for mat in matrices:\n result = np.tensordot(result, mat, axes=([1], [0]))\n return result\n\n\n#------------------------------------------------------------------------------\n# Numpy funcs for unit tests\n\ndef np_apply_complex_mat_to_kronecker(x, matrices):\n x = x.reshape((2, x.shape[1]) + tuple(mat.shape[1] for mat in matrices))\n result = x\n for mat in matrices:\n result = np_complex_tensordot(result, mat, axes=([1], [0]))\n return result\n\n\ndef np_complex_tensordot(a, b, axes=2):\n AR, AI = a[0, ...], a[1, ...]\n BR, BI = b[0, ...], b[1, ...]\n\n output = np.stack([\n np.tensordot(AR, BR, axes=axes) - np.tensordot(AI, BI, axes=axes),\n np.tensordot(AR, BI, axes=axes) + np.tensordot(AI, BR, axes=axes),\n ], axis=0)\n return output" ]
[ [ "numpy.tensordot" ] ]
ryscet/pySeries
[ "3ab1e0a9dbdeaef34c6c6d1fed5b248203c84fea" ]
[ "pyseries/Pipelines/AnalyzeBinRIv.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 24 12:27:09 2016\n\n@author: user\n\"\"\"\n\nimport sys\nsys.path.insert(0, '/Users/user/Desktop/repo_for_pyseries/pyseries')\n\nimport pyseries.LoadingData as loading\nimport pyseries.Preprocessing as prep\nimport pyseries.Analysis as analysis\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom scipy import stats\nfrom scipy import signal\n\n\ndef plot_bin_riv():\n paths = [#'/Users/user/Desktop/nagrania_eeg/binriv/Kuba_14_06_16/',\n '/Users/user/Desktop/nagrania_eeg/binriv/Karen_14_06_16/',\n '/Users/user/Desktop/nagrania_eeg/binriv/Ania_14_06_16/'\n ]\n for path in paths:\n recording = loading.Read_edf.Combine_EDF_XML(path,0,70)\n \n \n f, Pxx_den = signal.welch(recording['EEG P4'], fs = 498, nperseg=512)\n \n plt.figure()\n plt.plot(f, Pxx_den)\n \n \n epochs_before_info = {\"response_changed\": [ 498*5, 0] }\n \n epochs_before = prep.Epochs.Make_Epochs_for_Channels(recording, ['EEG P4'], epochs_before_info)['EEG P4']\n \n epochs_after_info = {\"response_changed\": [0, 498*5] }\n \n epochs_after = prep.Epochs.Make_Epochs_for_Channels(recording, ['EEG P4'], epochs_after_info)['EEG P4']\n \n epochs = {}\n epochs['P4'] = {'before_switch':epochs_before['response_changed'], 'after_switch': epochs_after['response_changed']}\n \n power_density= analysis.Explore.PlotPowerSpectrum(epochs['P4'], exact_sr =498, mode = 'welch', name = path, freq_min = 0, freq_max = 100)\n \n \n #f, Pxx_den = signal.welch(event, exact_sr, nperseg=512)\n" ]
[ [ "matplotlib.pyplot.plot", "scipy.signal.welch", "matplotlib.pyplot.figure" ] ]
yzhq97/distortion-free-wide-angle.pytorch
[ "3d8899a84e5f5c4fca62385116bfdaf4876b2ff7" ]
[ "src/data.py" ]
[ "import cv2\nimport os\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom stereographic import get_uniform_stereo_mesh\nfrom perception import get_face_masks, get_object_masks\n\n\nclass ImageDataset(Dataset):\n\n def __init__(self, args, root='data'):\n\n self.Q = args.Q\n self.mesh_ds_ratio = args.mesh_ds_ratio\n self.data_list = []\n for names in os.listdir(root):\n if names.endswith(\".jpg\"):\n self.data_list.append(os.path.join(root, names))\n self.data_list = sorted(self.data_list)\n\n\n def get_image_by_file(self, file, classes=None):\n\n data_name = file\n fov = int(data_name.split('/')[-1].split('.')[0].split('_')[-1])\n\n image = cv2.imread(data_name)\n H, W, _ = image.shape\n\n Hm = H // self.mesh_ds_ratio\n Wm = W // self.mesh_ds_ratio\n\n if classes is None:\n seg_mask, box_masks = get_face_masks(image)\n else:\n seg_mask, box_masks = get_object_masks(image, classes=classes)\n\n seg_mask = cv2.resize(seg_mask.astype(np.float32), (Wm, Hm))\n box_masks = [cv2.resize(box_mask.astype(np.float32), (Wm, Hm)) for box_mask in box_masks]\n box_masks = np.stack(box_masks, axis=0)\n seg_mask_padded = np.pad(seg_mask, [[self.Q, self.Q], [self.Q, self.Q]], \"constant\")\n box_masks_padded = np.pad(box_masks, [[0, 0], [self.Q, self.Q], [self.Q, self.Q]], \"constant\")\n\n mesh_uniform_padded, mesh_stereo_padded = get_uniform_stereo_mesh(image, fov * np.pi / 180, self.Q, self.mesh_ds_ratio)\n\n radial_distance_padded = np.linalg.norm(mesh_uniform_padded, axis=0)\n half_diagonal = np.linalg.norm([H + 2 * self.Q * self.mesh_ds_ratio, W + 2 * self.Q * self.mesh_ds_ratio]) / 2.\n ra = half_diagonal / 2.\n rb = half_diagonal / (2 * np.log(99))\n correction_strength = 1 / (1 + np.exp(-(radial_distance_padded - ra) / rb))\n\n return image, mesh_uniform_padded, mesh_stereo_padded, correction_strength, seg_mask_padded, box_masks_padded\n\n\n def __getitem__(self, index):\n\n index = index % len(self.data_list)\n data_name = self.data_list[index]\n\n return self.get_image_by_file(data_name)\n\n\n def __len__(self):\n return len(self.data_list)" ]
[ [ "numpy.log", "numpy.pad", "numpy.linalg.norm", "numpy.stack", "numpy.exp" ] ]
pkan2/addons
[ "8fe50d7600a592b06984f1ead61fdd8adb008ad1" ]
[ "tensorflow_addons/layers/normalizations_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport sys\n\nimport pytest\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_addons.layers.normalizations import FilterResponseNormalization\nfrom tensorflow_addons.layers.normalizations import GroupNormalization\nfrom tensorflow_addons.layers.normalizations import InstanceNormalization\nfrom tensorflow_addons.utils import test_utils\n\n\n# ------------Tests to ensure proper inheritance. If these suceed you can\n# test for Instance norm by setting Groupnorm groups = -1\ndef test_inheritance():\n assert issubclass(InstanceNormalization, GroupNormalization)\n assert InstanceNormalization.build == GroupNormalization.build\n assert InstanceNormalization.call == GroupNormalization.call\n\n\ndef test_groups_after_init():\n layers = InstanceNormalization()\n assert layers.groups == -1\n\n\ndef test_weights():\n # Check if weights get initialized correctly\n layer = GroupNormalization(groups=1, scale=False, center=False)\n layer.build((None, 3, 4))\n assert len(layer.trainable_weights) == 0\n assert len(layer.weights) == 0\n\n layer = InstanceNormalization()\n layer.build((None, 3, 4))\n assert len(layer.trainable_weights) == 2\n assert len(layer.weights) == 2\n\n\ndef test_apply_normalization():\n input_shape = (1, 4)\n reshaped_inputs = tf.constant([[[2.0, 2.0], [3.0, 3.0]]])\n layer = GroupNormalization(groups=2, axis=1, scale=False, center=False)\n normalized_input = layer._apply_normalization(reshaped_inputs, input_shape)\n np.testing.assert_equal(normalized_input, np.array([[[0.0, 0.0], [0.0, 0.0]]]))\n\n\n@test_utils.run_all_in_graph_and_eager_modes\nclass NormalizationTest(tf.test.TestCase):\n def test_reshape(self):\n def run_reshape_test(axis, group, input_shape, expected_shape):\n group_layer = GroupNormalization(groups=group, axis=axis)\n group_layer._set_number_of_groups_for_instance_norm(input_shape)\n\n inputs = np.ones(input_shape)\n tensor_input_shape = tf.convert_to_tensor(input_shape)\n reshaped_inputs, group_shape = group_layer._reshape_into_groups(\n inputs, (10, 10, 10), tensor_input_shape\n )\n for i in range(len(expected_shape)):\n self.assertEqual(self.evaluate(group_shape[i]), expected_shape[i])\n\n input_shape = (10, 10, 10)\n expected_shape = [10, 10, 5, 2]\n run_reshape_test(2, 5, input_shape, expected_shape)\n\n input_shape = (10, 10, 10)\n expected_shape = [10, 2, 5, 10]\n run_reshape_test(1, 2, input_shape, expected_shape)\n\n input_shape = (10, 10, 10)\n expected_shape = [10, 10, 1, 10]\n run_reshape_test(1, -1, input_shape, expected_shape)\n\n input_shape = (10, 10, 10)\n expected_shape = [10, 1, 10, 10]\n run_reshape_test(1, 1, input_shape, expected_shape)\n\n def test_feature_input(self):\n shape = (10, 100)\n for center in [True, False]:\n for scale in [True, False]:\n for groups in [-1, 1, 2, 5]:\n self._test_random_shape_on_all_axis_except_batch(\n shape, groups, center, scale\n )\n\n def test_picture_input(self):\n shape = (10, 30, 30, 3)\n for center in [True, False]:\n for scale in [True, False]:\n for groups in [-1, 1, 3]:\n self._test_random_shape_on_all_axis_except_batch(\n shape, groups, center, scale\n )\n\n def _test_random_shape_on_all_axis_except_batch(self, shape, groups, center, scale):\n inputs = tf.random.normal(shape)\n for axis in range(1, len(shape)):\n self._test_specific_layer(inputs, axis, groups, center, scale)\n\n def _test_specific_layer(self, inputs, axis, groups, center, scale):\n\n input_shape = inputs.shape\n\n # Get Output from Keras model\n layer = GroupNormalization(axis=axis, groups=groups, center=center, scale=scale)\n model = tf.keras.models.Sequential()\n model.add(layer)\n outputs = model.predict(inputs, steps=1)\n self.assertFalse(np.isnan(outputs).any())\n\n # Create shapes\n if groups is -1:\n groups = input_shape[axis]\n np_inputs = self.evaluate(inputs)\n reshaped_dims = list(np_inputs.shape)\n reshaped_dims[axis] = reshaped_dims[axis] // groups\n reshaped_dims.insert(axis, groups)\n reshaped_inputs = np.reshape(np_inputs, tuple(reshaped_dims))\n\n group_reduction_axes = list(range(1, len(reshaped_dims)))\n axis = -2 if axis == -1 else axis - 1\n group_reduction_axes.pop(axis)\n\n # Calculate mean and variance\n mean = np.mean(reshaped_inputs, axis=tuple(group_reduction_axes), keepdims=True)\n variance = np.var(\n reshaped_inputs, axis=tuple(group_reduction_axes), keepdims=True\n )\n\n # Get gamma and beta initalized by layer\n gamma, beta = layer._get_reshaped_weights(input_shape)\n if gamma is None:\n gamma = 1.0\n if beta is None:\n beta = 0.0\n\n # Get ouput from Numpy\n zeroed = reshaped_inputs - mean\n rsqrt = 1 / np.sqrt(variance + 1e-5)\n output_test = gamma * zeroed * rsqrt + beta\n\n # compare outputs\n output_test = tf.reshape(output_test, input_shape)\n self.assertAlmostEqual(\n self.evaluate(tf.reduce_mean(output_test - outputs)), 0, places=7\n )\n\n def _create_and_fit_Sequential_model(self, layer, shape):\n # Helperfunction for quick evaluation\n np.random.seed(0x2020)\n model = tf.keras.models.Sequential()\n model.add(layer)\n model.add(tf.keras.layers.Dense(32))\n model.add(tf.keras.layers.Dense(1))\n\n model.compile(\n optimizer=tf.keras.optimizers.RMSprop(0.01), loss=\"categorical_crossentropy\"\n )\n layer_shape = (10,) + shape\n input_batch = np.random.rand(*layer_shape)\n output_batch = np.random.rand(*(10, 1))\n model.fit(x=input_batch, y=output_batch, epochs=1, batch_size=1)\n return model\n\n def test_axis_error(self):\n with self.assertRaises(ValueError):\n GroupNormalization(axis=0)\n\n def test_groupnorm_flat(self):\n # Check basic usage of groupnorm_flat\n # Testing for 1 == LayerNorm, 16 == GroupNorm, -1 == InstanceNorm\n\n groups = [-1, 16, 1]\n shape = (64,)\n for i in groups:\n model = self._create_and_fit_Sequential_model(\n GroupNormalization(groups=i), shape\n )\n self.assertTrue(hasattr(model.layers[0], \"gamma\"))\n self.assertTrue(hasattr(model.layers[0], \"beta\"))\n\n def test_instancenorm_flat(self):\n # Check basic usage of instancenorm\n model = self._create_and_fit_Sequential_model(InstanceNormalization(), (64,))\n self.assertTrue(hasattr(model.layers[0], \"gamma\"))\n self.assertTrue(hasattr(model.layers[0], \"beta\"))\n\n def test_initializer(self):\n # Check if the initializer for gamma and beta is working correctly\n layer = GroupNormalization(\n groups=32,\n beta_initializer=\"random_normal\",\n beta_constraint=\"NonNeg\",\n gamma_initializer=\"random_normal\",\n gamma_constraint=\"NonNeg\",\n )\n\n model = self._create_and_fit_Sequential_model(layer, (64,))\n\n weights = np.array(model.layers[0].get_weights())\n negativ = weights[weights < 0.0]\n self.assertTrue(len(negativ) == 0)\n\n def test_regularizations(self):\n layer = GroupNormalization(\n gamma_regularizer=\"l1\", beta_regularizer=\"l1\", groups=4, axis=2\n )\n layer.build((None, 4, 4))\n self.assertEqual(len(layer.losses), 2)\n max_norm = tf.keras.constraints.max_norm\n layer = GroupNormalization(gamma_constraint=max_norm, beta_constraint=max_norm)\n layer.build((None, 3, 4))\n self.assertEqual(layer.gamma.constraint, max_norm)\n self.assertEqual(layer.beta.constraint, max_norm)\n\n def test_groupnorm_conv(self):\n # Check if Axis is working for CONV nets\n # Testing for 1 == LayerNorm, 5 == GroupNorm, -1 == InstanceNorm\n np.random.seed(0x2020)\n groups = [-1, 5, 1]\n for i in groups:\n model = tf.keras.models.Sequential()\n model.add(GroupNormalization(axis=1, groups=i, input_shape=(20, 20, 3)))\n model.add(tf.keras.layers.Conv2D(5, (1, 1), padding=\"same\"))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(1, activation=\"softmax\"))\n model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01), loss=\"mse\")\n x = np.random.randint(1000, size=(10, 20, 20, 3))\n y = np.random.randint(1000, size=(10, 1))\n model.fit(x=x, y=y, epochs=1)\n self.assertTrue(hasattr(model.layers[0], \"gamma\"))\n\n def test_groupnorm_correctness_1d(self):\n np.random.seed(0x2020)\n model = tf.keras.models.Sequential()\n norm = GroupNormalization(input_shape=(10,), groups=2)\n model.add(norm)\n model.compile(loss=\"mse\", optimizer=\"rmsprop\")\n\n x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))\n model.fit(x, x, epochs=5, verbose=0)\n out = model.predict(x)\n out -= self.evaluate(norm.beta)\n out /= self.evaluate(norm.gamma)\n\n self.assertAllClose(out.mean(), 0.0, atol=1e-1)\n self.assertAllClose(out.std(), 1.0, atol=1e-1)\n\n def test_groupnorm_2d_different_groups(self):\n np.random.seed(0x2020)\n groups = [2, 1, 10]\n for i in groups:\n model = tf.keras.models.Sequential()\n norm = GroupNormalization(axis=1, groups=i, input_shape=(10, 3))\n model.add(norm)\n # centered and variance are 5.0 and 10.0, respectively\n model.compile(loss=\"mse\", optimizer=\"rmsprop\")\n x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 3))\n model.fit(x, x, epochs=5, verbose=0)\n out = model.predict(x)\n out -= np.reshape(self.evaluate(norm.beta), (1, 10, 1))\n out /= np.reshape(self.evaluate(norm.gamma), (1, 10, 1))\n\n self.assertAllClose(\n out.mean(axis=(0, 1), dtype=np.float32), (0.0, 0.0, 0.0), atol=1e-1\n )\n self.assertAllClose(\n out.std(axis=(0, 1), dtype=np.float32), (1.0, 1.0, 1.0), atol=1e-1\n )\n\n def test_groupnorm_convnet(self):\n np.random.seed(0x2020)\n model = tf.keras.models.Sequential()\n norm = GroupNormalization(axis=1, input_shape=(3, 4, 4), groups=3)\n model.add(norm)\n model.compile(loss=\"mse\", optimizer=\"sgd\")\n\n # centered = 5.0, variance = 10.0\n x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))\n model.fit(x, x, epochs=4, verbose=0)\n out = model.predict(x)\n out -= np.reshape(self.evaluate(norm.beta), (1, 3, 1, 1))\n out /= np.reshape(self.evaluate(norm.gamma), (1, 3, 1, 1))\n\n self.assertAllClose(\n np.mean(out, axis=(0, 2, 3), dtype=np.float32), (0.0, 0.0, 0.0), atol=1e-1\n )\n self.assertAllClose(\n np.std(out, axis=(0, 2, 3), dtype=np.float32), (1.0, 1.0, 1.0), atol=1e-1\n )\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_groupnorm_convnet_no_center_no_scale():\n np.random.seed(0x2020)\n model = tf.keras.models.Sequential()\n norm = GroupNormalization(\n axis=-1, groups=2, center=False, scale=False, input_shape=(3, 4, 4)\n )\n model.add(norm)\n model.compile(loss=\"mse\", optimizer=\"sgd\")\n # centered and variance are 5.0 and 10.0, respectively\n x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))\n model.fit(x, x, epochs=4, verbose=0)\n out = model.predict(x)\n\n np.testing.assert_allclose(\n np.mean(out, axis=(0, 2, 3), dtype=np.float32), (0.0, 0.0, 0.0), atol=1e-1\n )\n np.testing.assert_allclose(\n np.std(out, axis=(0, 2, 3), dtype=np.float32), (1.0, 1.0, 1.0), atol=1e-1\n )\n\n\ndef calculate_frn(\n x, beta=0.2, gamma=1, eps=1e-6, learned_epsilon=False, dtype=np.float32\n):\n if learned_epsilon:\n eps = eps + 1e-4\n eps = tf.cast(eps, dtype=dtype)\n nu2 = tf.reduce_mean(tf.square(x), axis=[1, 2], keepdims=True)\n x = x * tf.math.rsqrt(nu2 + tf.abs(eps))\n return gamma * x + beta\n\n\ndef set_random_seed():\n seed = 0x2020\n np.random.seed(seed)\n tf.random.set_seed(seed)\n\n\[email protected](\"maybe_run_functions_eagerly\")\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_with_beta(dtype):\n set_random_seed()\n inputs = np.random.rand(28, 28, 1).astype(dtype)\n inputs = np.expand_dims(inputs, axis=0)\n frn = FilterResponseNormalization(\n beta_initializer=\"ones\", gamma_initializer=\"ones\", dtype=dtype\n )\n frn.build((None, 28, 28, 1))\n observed = frn(inputs)\n expected = calculate_frn(inputs, beta=1, gamma=1, dtype=dtype)\n np.testing.assert_allclose(expected[0], observed[0])\n\n\[email protected](\"maybe_run_functions_eagerly\")\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_with_gamma(dtype):\n set_random_seed()\n inputs = np.random.rand(28, 28, 1).astype(dtype)\n inputs = np.expand_dims(inputs, axis=0)\n frn = FilterResponseNormalization(\n beta_initializer=\"zeros\", gamma_initializer=\"ones\", dtype=dtype\n )\n frn.build((None, 28, 28, 1))\n observed = frn(inputs)\n expected = calculate_frn(inputs, beta=0, gamma=1, dtype=dtype)\n np.testing.assert_allclose(expected[0], observed[0])\n\n\[email protected](\"maybe_run_functions_eagerly\")\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_with_epsilon(dtype):\n set_random_seed()\n inputs = np.random.rand(28, 28, 1).astype(dtype)\n inputs = np.expand_dims(inputs, axis=0)\n frn = FilterResponseNormalization(\n beta_initializer=tf.keras.initializers.Constant(0.5),\n gamma_initializer=\"ones\",\n learned_epsilon=True,\n dtype=dtype,\n )\n frn.build((None, 28, 28, 1))\n observed = frn(inputs)\n expected = calculate_frn(\n inputs, beta=0.5, gamma=1, learned_epsilon=True, dtype=dtype\n )\n np.testing.assert_allclose(expected[0], observed[0])\n\n\[email protected](\"maybe_run_functions_eagerly\")\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_keras_model(dtype):\n set_random_seed()\n frn = FilterResponseNormalization(\n beta_initializer=\"ones\", gamma_initializer=\"ones\", dtype=dtype\n )\n random_inputs = np.random.rand(10, 32, 32, 3).astype(dtype)\n random_labels = np.random.randint(2, size=(10,)).astype(dtype)\n input_layer = tf.keras.layers.Input(shape=(32, 32, 3))\n x = frn(input_layer)\n x = tf.keras.layers.Flatten()(x)\n out = tf.keras.layers.Dense(1, activation=\"sigmoid\")(x)\n model = tf.keras.models.Model(input_layer, out)\n model.compile(loss=\"binary_crossentropy\", optimizer=\"sgd\")\n model.fit(random_inputs, random_labels, epochs=2)\n\n\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_serialization(dtype):\n frn = FilterResponseNormalization(\n beta_initializer=\"ones\", gamma_initializer=\"ones\", dtype=dtype\n )\n serialized_frn = tf.keras.layers.serialize(frn)\n new_layer = tf.keras.layers.deserialize(serialized_frn)\n assert frn.get_config() == new_layer.get_config()\n\n\[email protected](\"maybe_run_functions_eagerly\")\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_eps_gards(dtype):\n set_random_seed()\n random_inputs = np.random.rand(10, 32, 32, 3).astype(np.float32)\n random_labels = np.random.randint(2, size=(10,)).astype(np.float32)\n input_layer = tf.keras.layers.Input(shape=(32, 32, 3))\n frn = FilterResponseNormalization(\n beta_initializer=\"ones\", gamma_initializer=\"ones\", learned_epsilon=True\n )\n initial_eps_value = frn.eps_learned.numpy()[0]\n x = frn(input_layer)\n x = tf.keras.layers.Flatten()(x)\n out = tf.keras.layers.Dense(1, activation=\"sigmoid\")(x)\n model = tf.keras.models.Model(input_layer, out)\n model.compile(loss=\"binary_crossentropy\", optimizer=\"sgd\")\n model.fit(random_inputs, random_labels, epochs=1)\n final_eps_value = frn.eps_learned.numpy()[0]\n assert initial_eps_value != final_eps_value\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__]))\n" ]
[ [ "tensorflow.convert_to_tensor", "numpy.expand_dims", "numpy.sqrt", "tensorflow.cast", "numpy.mean", "tensorflow.random.set_seed", "numpy.random.randint", "tensorflow.keras.layers.deserialize", "tensorflow.keras.optimizers.RMSprop", "tensorflow.keras.layers.Conv2D", "numpy.std", "tensorflow.square", "tensorflow.keras.layers.serialize", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Flatten", "tensorflow.keras.models.Model", "numpy.isnan", "tensorflow.keras.layers.Dense", "numpy.random.rand", "numpy.testing.assert_allclose", "numpy.array", "tensorflow.keras.initializers.Constant", "tensorflow.constant", "numpy.random.seed", "tensorflow.reduce_mean", "tensorflow.reshape", "numpy.ones", "numpy.random.normal", "tensorflow.random.normal", "tensorflow.abs", "tensorflow.keras.layers.Input" ] ]
sosaucily/hummingbot
[ "082883319253399b2c7a321c709c97dcd84b9b72" ]
[ "hummingbot/client/command/config_command.py" ]
[ "import asyncio\nfrom typing import (\n List,\n Any,\n)\nfrom decimal import Decimal\nimport pandas as pd\nfrom os.path import join\nfrom hummingbot.client.settings import (\n GLOBAL_CONFIG_PATH,\n CONF_FILE_PATH,\n)\nfrom hummingbot.client.config.global_config_map import global_config_map\nfrom hummingbot.client.config.config_validators import validate_bool\nfrom hummingbot.client.config.config_helpers import (\n missing_required_configs,\n save_to_yml\n)\nfrom hummingbot.client.config.security import Security\nfrom hummingbot.client.config.config_var import ConfigVar\nfrom hummingbot.core.utils.async_utils import safe_ensure_future\nfrom hummingbot.strategy.pure_market_making import (\n PureMarketMakingStrategy\n)\nfrom hummingbot.user.user_balances import UserBalances\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from hummingbot.client.hummingbot_application import HummingbotApplication\n\n\nno_restart_pmm_keys_in_percentage = [\"bid_spread\", \"ask_spread\", \"order_level_spread\", \"inventory_target_base_pct\"]\nno_restart_pmm_keys = [\"order_amount\", \"order_levels\", \"filled_order_delay\", \"inventory_skew_enabled\", \"inventory_range_multiplier\"]\nglobal_configs_to_display = [\"0x_active_cancels\",\n \"kill_switch_enabled\",\n \"kill_switch_rate\",\n \"telegram_enabled\",\n \"telegram_token\",\n \"telegram_chat_id\",\n \"send_error_logs\",\n \"script_enabled\",\n \"script_file_path\",\n \"manual_gas_price\",\n \"ethereum_chain_name\",\n \"ethgasstation_gas_enabled\",\n \"ethgasstation_api_key\",\n \"ethgasstation_gas_level\",\n \"ethgasstation_refresh_time\",\n \"gateway_enabled\",\n \"gateway_cert_passphrase\",\n \"gateway_api_host\",\n \"gateway_api_port\"]\n\n\nclass ConfigCommand:\n def config(self, # type: HummingbotApplication\n key: str = None,\n value: str = None):\n self.app.clear_input()\n if key is None:\n self.list_configs()\n return\n else:\n if key not in self.config_able_keys():\n self._notify(\"Invalid key, please choose from the list.\")\n return\n safe_ensure_future(self._config_single_key(key, value), loop=self.ev_loop)\n\n def list_configs(self, # type: HummingbotApplication\n ):\n columns = [\"Key\", \" Value\"]\n data = [[cv.key, cv.value] for cv in global_config_map.values()\n if cv.key in global_configs_to_display and not cv.is_secure]\n df = pd.DataFrame(data=data, columns=columns)\n self._notify(\"\\nGlobal Configurations:\")\n lines = [\" \" + line for line in df.to_string(index=False).split(\"\\n\")]\n self._notify(\"\\n\".join(lines))\n\n if self.strategy_name is not None:\n data = [[cv.key, cv.value] for cv in self.strategy_config_map.values() if not cv.is_secure]\n df = pd.DataFrame(data=data, columns=columns)\n self._notify(\"\\nStrategy Configurations:\")\n lines = [\" \" + line for line in df.to_string(index=False).split(\"\\n\")]\n self._notify(\"\\n\".join(lines))\n\n def config_able_keys(self # type: HummingbotApplication\n ) -> List[str]:\n \"\"\"\n Returns a list of configurable keys - using config command, excluding exchanges api keys\n as they are set from connect command.\n \"\"\"\n keys = [c.key for c in global_config_map.values() if c.prompt is not None and not c.is_connect_key]\n if self.strategy_config_map is not None:\n keys += [c.key for c in self.strategy_config_map.values() if c.prompt is not None]\n return keys\n\n async def check_password(self, # type: HummingbotApplication\n ):\n password = await self.app.prompt(prompt=\"Enter your password >>> \", is_password=True)\n if password != Security.password:\n self._notify(\"Invalid password, please try again.\")\n return False\n else:\n return True\n\n # Make this function static so unit testing can be performed.\n @staticmethod\n def update_running_pure_mm(pure_mm_strategy: PureMarketMakingStrategy, key: str, new_value: Any):\n if key in no_restart_pmm_keys_in_percentage:\n setattr(pure_mm_strategy, key, new_value / Decimal(\"100\"))\n return True\n elif key in no_restart_pmm_keys:\n setattr(pure_mm_strategy, key, new_value)\n return True\n return False\n\n async def _config_single_key(self, # type: HummingbotApplication\n key: str,\n input_value):\n \"\"\"\n Configure a single variable only.\n Prompt the user to finish all configurations if there are remaining empty configs at the end.\n \"\"\"\n\n self.placeholder_mode = True\n self.app.hide_input = True\n\n try:\n config_var, config_map, file_path = None, None, None\n if key in global_config_map:\n config_map = global_config_map\n file_path = GLOBAL_CONFIG_PATH\n elif self.strategy_config_map is not None and key in self.strategy_config_map:\n config_map = self.strategy_config_map\n file_path = join(CONF_FILE_PATH, self.strategy_file_name)\n config_var = config_map[key]\n if input_value is None:\n self._notify(\"Please follow the prompt to complete configurations: \")\n if config_var.key == \"inventory_target_base_pct\":\n await self.asset_ratio_maintenance_prompt(config_map, input_value)\n else:\n await self.prompt_a_config(config_var, input_value=input_value, assign_default=False)\n if self.app.to_stop_config:\n self.app.to_stop_config = False\n return\n await self.update_all_secure_configs()\n missings = missing_required_configs(config_map)\n if missings:\n self._notify(\"\\nThere are other configuration required, please follow the prompt to complete them.\")\n missings = await self._prompt_missing_configs(config_map)\n save_to_yml(file_path, config_map)\n self._notify(\"\\nNew configuration saved:\")\n self._notify(f\"{key}: {str(config_var.value)}\")\n for config in missings:\n self._notify(f\"{config.key}: {str(config.value)}\")\n if isinstance(self.strategy, PureMarketMakingStrategy):\n updated = ConfigCommand.update_running_pure_mm(self.strategy, key, config_var.value)\n if updated:\n self._notify(f\"\\nThe current {self.strategy_name} strategy has been updated \"\n f\"to reflect the new configuration.\")\n except asyncio.TimeoutError:\n self.logger().error(\"Prompt timeout\")\n except Exception as err:\n self.logger().error(str(err), exc_info=True)\n finally:\n self.app.hide_input = False\n self.placeholder_mode = False\n self.app.change_prompt(prompt=\">>> \")\n\n async def _prompt_missing_configs(self, # type: HummingbotApplication\n config_map):\n missings = missing_required_configs(config_map)\n for config in missings:\n await self.prompt_a_config(config)\n if self.app.to_stop_config:\n self.app.to_stop_config = False\n return\n if missing_required_configs(config_map):\n return missings + (await self._prompt_missing_configs(config_map))\n return missings\n\n async def asset_ratio_maintenance_prompt(self, # type: HummingbotApplication\n config_map,\n input_value = None):\n if input_value:\n config_map['inventory_target_base_pct'].value = Decimal(input_value)\n else:\n exchange = config_map['exchange'].value\n market = config_map[\"market\"].value\n base, quote = market.split(\"-\")\n balances = await UserBalances.instance().balances(exchange, base, quote)\n if balances is None:\n return\n base_ratio = UserBalances.base_amount_ratio(exchange, market, balances)\n if base_ratio is None:\n return\n base_ratio = round(base_ratio, 3)\n quote_ratio = 1 - base_ratio\n base, quote = config_map[\"market\"].value.split(\"-\")\n\n cvar = ConfigVar(key=\"temp_config\",\n prompt=f\"On {exchange}, you have {balances.get(base, 0):.4f} {base} and \"\n f\"{balances.get(quote, 0):.4f} {quote}. By market value, \"\n f\"your current inventory split is {base_ratio:.1%} {base} \"\n f\"and {quote_ratio:.1%} {quote}.\"\n f\" Would you like to keep this ratio? (Yes/No) >>> \",\n required_if=lambda: True,\n type_str=\"bool\",\n validator=validate_bool)\n await self.prompt_a_config(cvar)\n if cvar.value:\n config_map['inventory_target_base_pct'].value = round(base_ratio * Decimal('100'), 1)\n else:\n if self.app.to_stop_config:\n self.app.to_stop_config = False\n return\n await self.prompt_a_config(config_map[\"inventory_target_base_pct\"])\n" ]
[ [ "pandas.DataFrame" ] ]
samkreter/kmeans-clustering-with-spatial-bias
[ "17d47c564074f8d789b7f5370ea0acc56c19f529" ]
[ "getDataSet.py" ]
[ "import scipy.io\nimport numpy as np\n\n#convert main dataset\n# mat = scipy.io.loadmat(\"Indian_pines.mat\")\n# npMat = np.array(mat['indian_pines'])\n\n# np.save(\"npIndian_pines.npy\",npMat)\n\n#convert ground truth data set\nmat = scipy.io.loadmat(\"Indian_pines_gt.mat\")\nnpMat = np.array(mat['indian_pines_gt'])\n\nnp.save(\"npIndian_pines_gt.npy\",npMat)" ]
[ [ "numpy.array", "numpy.save" ] ]
alanbseo/deepgreen
[ "b8a19c83d75f275c5e58bc7a48beb22ce61a81d9" ]
[ "PythonScripts/Flickr_BatchTagging_EU_keal.py" ]
[ "import os\nimport json\n\nimport numpy as np\nimport pandas as pd\n\nfrom keras.applications import inception_resnet_v2\n\nfrom keras.preprocessing import image\n\nimg_width, img_height = 331, 331\n\n\nimport fnmatch\n\nfrom shutil import copyfile\n\nimport PIL\nfrom PIL import ImageFile\n\nImageFile.LOAD_TRUNCATED_IMAGES = True # read broken images\n\n\n\n# copy jpg files\ntoCopyFile = False\n\n \nmodelname = \"InceptionResnetV2\"\n\n\n# EU\ndataname = \"EU\"\n\n# KEAL\ndefault_path = '/pd/data/crafty/deepGreen'\nphoto_path_base = \"/pd/data/crafty/FlickrEU_DOWNLOAD_14May2018/May2018_V1_Photo/\"\nout_path_base = \"/pd/data/crafty/FlickrEU_result/Tagging_EU2018_v3/\"\n\n# photo_path_base = \"/pd/data/crafty/FlickrEU_DOWNLOAD_11Jan2019/Jan2019_V1_Photos/\"\n# out_path_base = \"/pd/data/crafty/FlickrEU_result/Tagging_EU2019_v3/\"\n\n# Linux\n# default_path = '/home/alan/Dropbox/KIT/FlickrEU/deepGreen'\n# photo_path_base = \"/home/alan/Dropbox/KIT/FlickrEU/FlickrEU_download/SamplePhotos/\"\n# # photo_path_base = \"/DATA10TB/FlickrEU_download/Bayern/Flickr_Aug2018_V2_Photo_Bayern/\"\n# out_path_base = \"/home/alan/Dropbox/KIT/FlickrEU/LabelledData/Test/\"\n\n\n\nos.chdir(default_path)\n\nout_path = out_path_base + modelname + \"/\" + dataname + \"/\"\n\n# number of images for one batch prediction\nprediction_batch_size = 1024\n\ntop = 10 # print top-n classes\n\nimg_width = img_height = 299\nmodel_trained = inception_resnet_v2.InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None,\n input_shape=(img_width, img_height, 3))\n\n# Imagenet class labels\nimagenet_labels_filename = \"Data/imagenet_class_index.json\"\nwith open(imagenet_labels_filename) as f:\n CLASS_INDEX = json.load(f)\n#\nclasses = []\nfor i in range(CLASS_INDEX.__len__()):\n classes.append(CLASS_INDEX[str(i)][1])\n\nclasses_arr = np.array(classes)\n\nnum_classes = len(classes)\n\n##### Predict\n\n\n\n# list only folder names\nfoldernames = [d for d in os.listdir(photo_path_base) if os.path.isdir(os.path.join(photo_path_base, d))]\n\nf_idx = 1\n\nfor f_idx in (range(10000, len(foldernames))):\n# for f_idx in (range(0, 1)):\n\n foldername = foldernames[f_idx]\n print(\"folder idx:\" + str(f_idx))\n print(foldername)\n photo_path_aoi = os.path.join(photo_path_base, foldername)\n\n for (root, subdirs, files) in os.walk(photo_path_aoi):\n\n if len(subdirs) == 0:\n continue # skip if it does not have a subdir\n print('--\\nroot = ' + root)\n\n # csv output file\n name_csv = out_path + \"Result/\" + \"/CSV/\" + os.path.relpath(root, photo_path_base) + \".csv\"\n if os.path.exists(name_csv):\n print(\"skips as it is done already\")\n continue # skip the folder if there is already the output csv file\n\n\n ### Read filenames\n\n filenames_raw = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(photo_path_aoi)) for f in fn]\n # print(filenames_raw)\n\n filenames1 = fnmatch.filter(filenames_raw, \"*.jpg\")\n filenames2 = fnmatch.filter(filenames_raw, \"*.JPG\")\n\n filenames = filenames1 + filenames2\n\n n_files = len(filenames)\n\n # print(filenames)\n\n\n def foo_get_year(x):\n return (os.path.basename(os.path.dirname(x)))\n\n years = list(map(foo_get_year, filenames))\n\n\n if n_files == 0:\n print(\"skips as there is no image\")\n continue # skip the folder if there is no image\n\n\n # base filenames\n base_filenames = list(map(os.path.basename, filenames))\n\n prediction_steps_per_epoch = int(np.ceil(n_files / prediction_batch_size))\n\n # load all images into a list\n batch_size_folder = min(n_files, prediction_batch_size) # n_files can be smaller than the batch size\n\n for step_start_idx in range(0, n_files, batch_size_folder):\n\n end_idx = min(step_start_idx + batch_size_folder, n_files)\n\n print(step_start_idx)\n print(end_idx)\n\n if step_start_idx == end_idx:\n\n filenames_batch = [filenames[step_start_idx]]\n else:\n\n filenames_batch = filenames[step_start_idx:end_idx]\n\n bsize_tmp = min(batch_size_folder, len(filenames_batch)) # for the last batch\n\n images = []\n\n images_broken_idx = np.empty(bsize_tmp, dtype=bool)\n images_broken_idx[:] = False\n\n\n for f_idx, fname in enumerate(filenames_batch):\n # print(f_idx, fname)\n\n # print(img_name)\n img_name = os.path.join(photo_path_aoi, root, fname)\n\n # load an image in PIL format\n try:\n img = image.load_img(img_name, target_size=(img_width, img_height))\n except:\n print(\"skips as it is broken\")\n print(f_idx, fname)\n images_broken_idx[f_idx] = True\n img = PIL.Image.new(mode=\"RGB\", size=(img_width, img_height))\n\n img = image.img_to_array(img)\n img = np.expand_dims(img, axis=0)\n\n # prepare the image (normalisation for channels)\n img_preprocessed = inception_resnet_v2.preprocess_input(img.copy())\n images.append(img_preprocessed)\n\n # vstack for batch tagging\n images_vstack = np.vstack(images)\n\n # stack up images list to pass for prediction\n predictions = model_trained.predict(images_vstack, batch_size=bsize_tmp)\n\n # predictions.shape\n\n ## top selected classes\n top_classes_idx_arr = np.argsort(predictions)[:, ::-1][:, :top]\n\n top_classes_arr = classes_arr[top_classes_idx_arr]\n # print(top_classes_arr)\n\n # create an empty array\n top_classes_probs_arr = np.empty([bsize_tmp, top])\n top_classes_probs_arr[:] = 0\n\n for i in range(0, bsize_tmp):\n top_classes_probs_arr[i,] = predictions[i, [top_classes_idx_arr[i,]]]\n\n # np.argsort(predictions)[:, ::-1][:,:top][0, :]\n\n # chainlink_fence', 'worm_fence', 'lakeside', 'seashore', 'stone_wall', 'cliff', 'breakwater']\n # Out[61]: array([489, 912, 975, 978, 825, 972, 460])\n top_classes_arr[0, :]\n top_classes_probs_arr[0, :]\n\n predicted_class_v = top_classes_arr[:, 0] # top1\n #predicted_class_top2_v = top_classes_arr[:, 1] # top2\n\n\n # print('Predicted:', predicted_class_v)\n\n # 2nd-level\n # kind of equivalent to `sapply()' in R\n # def foo_get_predicted_filename(x):\n # return (out_path + \"Result/\" + modelname + \"/ClassifiedPhotos/\" + os.path.relpath(root,\n # photo_path_base) + \"/\" + x)\n\n\n # predicted_filenames = list(map(foo_get_predicted_filename, predicted_class_v))\n\n top_classes_arr[images_broken_idx,] = \"\"\n top_classes_probs_arr[images_broken_idx,]= 0\n\n arr_tmp = pd.DataFrame(np.concatenate((top_classes_arr, top_classes_probs_arr), axis=1))\n\n if step_start_idx == 0:\n arr_aoi = arr_tmp\n else:\n arr_aoi = np.concatenate((arr_aoi, arr_tmp), axis=0)\n\n\n # save_folder_names = list(map(os.path.basename, predicted_filenames))\n\n # create necessary folders\n # for i in range(0, n_files):\n # if not (os.path.exists(save_folder_names[i])):\n # os.makedirs(save_folder_names[i], exist_ok=False)\n # if (toCopyFile):\n # for i in range(0, bsize_tmp):\n #\n # save_folder = predicted_filenames[i]\n # print(save_folder)\n #\n # if not (os.path.exists(save_folder)):\n # os.makedirs(save_folder, exist_ok=False)\n # copyfile(filenames_batch[i], predicted_filenames[i] + '/' + os.path.basename(filenames_batch[i]))\n\n # Write csv files\n if not (os.path.exists(os.path.dirname(name_csv))):\n os.makedirs(os.path.dirname(name_csv), exist_ok=True)\n\n # Write a Pandas data frame\n df_aoi = pd.concat([pd.DataFrame(base_filenames), pd.DataFrame(years), pd.DataFrame(arr_aoi)], axis=1)\n header = np.concatenate(\n ([\"Filename\"], [\"Year\"],[\"Top1\", \"Top2\", \"Top3\", \"Top4\", \"Top5\", \"Top6\", \"Top7\", \"Top8\", \"Top9\", \"Top10\"],\n [\"Prob1\", \"Prob2\", \"Prob3\", \"Prob4\", \"Prob5\", \"Prob6\", \"Prob7\", \"Prob8\", \"Prob9\", \"Prob10\"]))\n\n df_aoi.columns = header\n df_aoi.to_csv(name_csv, index=False, columns=header)\n\n # @todo attention map\n" ]
[ [ "numpy.expand_dims", "numpy.empty", "pandas.DataFrame", "numpy.concatenate", "numpy.ceil", "numpy.argsort", "numpy.array", "numpy.vstack" ] ]
brettkoonce/fairscale
[ "05ce7971d256893a7707a8a99e89ec3ef75ab7c0" ]
[ "tests/nn/model_parallel/test_layers.py" ]
[ "# coding=utf-8\n\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport tempfile\n\nimport pytest\nimport torch\nfrom torch import nn\nfrom torch.distributed import rpc\nimport torch.nn.init as init\nfrom torch.nn.parameter import Parameter\n\nfrom fairscale.nn.model_parallel import initialize as mpu\nfrom fairscale.nn.model_parallel import layers\nfrom fairscale.nn.pipe import MultiProcessPipe\nfrom fairscale.utils.testing import dist_init, get_world_sizes, set_random_seed, spawn_for_all_world_sizes, torch_spawn\n\n\ndef run_test_parallel_embedding(rank, model_parallel_size, filename, filename_rpc):\n dist_init(rank, model_parallel_size, filename, filename_rpc)\n\n if torch.distributed.get_rank() == 0:\n print(\"> testing parallel embedding with model parallel size {} ...\".format(model_parallel_size))\n\n mpu.initialize_model_parallel(model_parallel_size)\n model_parallel_size = mpu.get_model_parallel_world_size()\n\n batch_size = 17\n seq_length = 23\n vocab_size = 48\n hidden_size = 16\n seed = 1236\n\n set_random_seed(123)\n input_data = torch.LongTensor(size=(batch_size, seq_length)).random_(0, vocab_size).cuda()\n loss_weight = torch.randn([batch_size, seq_length, hidden_size]).cuda()\n\n set_random_seed(seed)\n embedding_original = torch.nn.Embedding(vocab_size, hidden_size).cuda()\n\n output = embedding_original(input_data)\n loss_original = torch.mul(output, loss_weight).sum()\n loss_original.backward()\n\n set_random_seed(seed)\n embedding_parallel = layers.ParallelEmbedding(vocab_size, hidden_size, init_method=init.normal_).cuda()\n output = embedding_parallel(input_data)\n loss_parallel = torch.mul(output, loss_weight).sum()\n loss_parallel.backward()\n\n set_random_seed(seed)\n embedding_vocab_parallel = layers.VocabParallelEmbedding(vocab_size, hidden_size, init_method=init.normal_).cuda()\n output = embedding_vocab_parallel(input_data)\n loss_vocab_parallel = torch.mul(output, loss_weight).sum()\n loss_vocab_parallel.backward()\n\n torch.distributed.barrier()\n error = loss_parallel.sub(loss_original).abs()\n print(\" error in loss (parallel) on global rank {}: {}\".format(torch.distributed.get_rank(), error))\n assert error < 1.0e-12, \"error: {}\".format(error)\n\n torch.distributed.barrier()\n error = loss_vocab_parallel.sub(loss_original).abs()\n print(\" error in loss (vocab parallel) on global rank {}: {}\".format(torch.distributed.get_rank(), error))\n assert error < 1.0e-12, \"error: {}\".format(error)\n\n weight_grad_orig = torch.split(embedding_original.weight.grad, hidden_size // model_parallel_size, 1)[\n mpu.get_model_parallel_rank()\n ]\n error = embedding_parallel.weight.grad.sub(weight_grad_orig).abs().max()\n print(\" error in grad (parallel) on global rank {}: {}\".format(torch.distributed.get_rank(), error))\n assert error < 1.0e-12, \"error: {}\".format(error)\n\n weight_grad_orig = torch.split(embedding_original.weight.grad, vocab_size // model_parallel_size, 0)[\n mpu.get_model_parallel_rank()\n ]\n error = embedding_vocab_parallel.weight.grad.sub(weight_grad_orig).abs().max()\n print(\" error in grad (vocab parallel) on global rank {}: {}\".format(torch.distributed.get_rank(), error))\n assert error < 1.0e-12, \"error: {}\".format(error)\n\n # Reset groups\n mpu.destroy_model_parallel()\n\n torch.distributed.barrier()\n if torch.distributed.get_rank() == 0:\n print(\">> passed the test :-)\")\n\n\ndef run_test_initialize_affine_weight(rank, model_parallel_size, filename, filename_rpc):\n dist_init(rank, model_parallel_size, filename, filename_rpc)\n\n mpu.initialize_model_parallel(model_parallel_size)\n if torch.distributed.get_rank() == 0:\n print(\"> testing initialize_affine_weight with model parallel size: {}\".format(model_parallel_size))\n model_parallel_size = mpu.get_model_parallel_world_size()\n\n seed = 12345\n input_size_coeff = 13\n input_size = input_size_coeff * model_parallel_size\n output_size_coeff = 17\n output_size = output_size_coeff * model_parallel_size\n\n # ---------------\n # Column parallel\n # ---------------\n weight = torch.empty(output_size_coeff, input_size)\n set_random_seed(seed)\n layers._initialize_affine_weight(weight, output_size, input_size, output_size_coeff, 0, torch.nn.init.normal_)\n # Target.\n set_random_seed(seed)\n master_weight = torch.empty(output_size, input_size)\n torch.nn.init.normal_(master_weight)\n rank = mpu.get_model_parallel_rank()\n my_weight = torch.split(master_weight, output_size_coeff, dim=0)[rank].contiguous().clone()\n\n # Compare.\n error = weight.sub(my_weight).abs().max()\n torch.distributed.barrier()\n print(\n \" column parallel max error (should be zero) on global rank {}: {}\".format(\n torch.distributed.get_rank(), error\n )\n )\n assert error < 1.0e-6\n\n # ------------\n # Row parallel\n # ------------\n weight = torch.empty(output_size, input_size_coeff)\n set_random_seed(seed)\n layers._initialize_affine_weight(weight, output_size, input_size, input_size_coeff, 1, torch.nn.init.normal_)\n # Target.\n set_random_seed(seed)\n master_weight = torch.empty(output_size, input_size)\n torch.nn.init.normal_(master_weight)\n rank = mpu.get_model_parallel_rank()\n my_weight = torch.split(master_weight, input_size_coeff, dim=1)[rank].contiguous().clone()\n\n # Compare.\n error = weight.sub(my_weight).abs().max()\n torch.distributed.barrier()\n print(\n \" row parallel max error (should be zero) on global rank {}: {}\".format(torch.distributed.get_rank(), error)\n )\n assert error < 1.0e-6\n\n # Reset groups\n mpu.destroy_model_parallel()\n\n torch.distributed.barrier()\n if torch.distributed.get_rank() == 0:\n print(\" >> passed the test :-)\")\n\n\nclass IdentityLayer2D(torch.nn.Module):\n def __init__(self, m, n):\n super(IdentityLayer2D, self).__init__()\n self.weight = Parameter(torch.Tensor(m, n))\n torch.nn.init.xavier_normal_(self.weight)\n\n def forward(self):\n return self.weight\n\n\ndef run_test_column_parallel_linear(rank, model_parallel_size, filename, filename_rpc):\n dist_init(rank, model_parallel_size, filename, filename_rpc)\n\n mpu.initialize_model_parallel(model_parallel_size)\n if torch.distributed.get_rank() == 0:\n print(\"> testing ColumnParallelLinear with model parallel size: {}\".format(model_parallel_size))\n model_parallel_size = mpu.get_model_parallel_world_size()\n\n seed = 12345\n set_random_seed(seed)\n input_size_coeff = 13\n input_size = input_size_coeff * model_parallel_size\n output_size_coeff = 17\n output_size = output_size_coeff * model_parallel_size\n batch_size = 7\n\n # Network\n identity_layer = IdentityLayer2D(batch_size, input_size).cuda()\n linear_layer = layers.ColumnParallelLinear(input_size, output_size, keep_master_weight_for_test=True).cuda()\n loss_weight = torch.randn([batch_size, output_size]).cuda()\n # Forward\n input_ = identity_layer()\n output = linear_layer(input_)\n loss = torch.mul(output, loss_weight).sum()\n # Backward\n loss.backward()\n\n # Values.\n dLdY = loss_weight\n X = identity_layer.weight\n A = linear_layer.master_weight.cuda()\n dLdA = torch.matmul(dLdY.t(), X)\n dLdb = torch.matmul(torch.ones(batch_size, 1).cuda().t(), dLdY).view(-1)\n dLdX = torch.matmul(dLdY, A)\n\n rank = mpu.get_model_parallel_rank()\n my_dLdA = torch.split(dLdA, output_size_coeff, dim=0)[rank].contiguous().clone()\n error = my_dLdA.sub(linear_layer.weight.grad).abs().max()\n torch.distributed.barrier()\n print(\" error in dLdA on global rank {}: {}\".format(torch.distributed.get_rank(), error))\n assert error < 1.0e-6\n\n my_dLdb = torch.split(dLdb, output_size_coeff, dim=0)[rank].contiguous().clone()\n error = my_dLdb.sub(linear_layer.bias.grad).abs().max()\n torch.distributed.barrier()\n print(\" error in dLdb on global rank {}: {}\".format(torch.distributed.get_rank(), error))\n assert error < 1.0e-6\n\n error = dLdX.sub(identity_layer.weight.grad).abs().max()\n torch.distributed.barrier()\n print(\" error in dLdX on global rank {}: {}\".format(torch.distributed.get_rank(), error))\n assert error < 1.0e-6\n\n # Reset groups\n mpu.destroy_model_parallel()\n\n torch.distributed.barrier()\n if torch.distributed.get_rank() == 0:\n print(\" >> passed the test :-)\")\n\n\ndef run_test_row_parallel_linear(rank, model_parallel_size, filename, filename_rpc):\n dist_init(rank, model_parallel_size, filename, filename_rpc)\n\n mpu.initialize_model_parallel(model_parallel_size)\n if torch.distributed.get_rank() == 0:\n print(\"> testing RowParallelLinear with model parallel size: {}\".format(model_parallel_size))\n model_parallel_size = mpu.get_model_parallel_world_size()\n\n seed = 12345\n set_random_seed(seed)\n input_size_coeff = 13\n input_size = input_size_coeff * model_parallel_size\n output_size_coeff = 17\n output_size = output_size_coeff * model_parallel_size\n batch_size = 7\n\n # Network\n identity_layer = IdentityLayer2D(batch_size, input_size).cuda()\n linear_layer = layers.RowParallelLinear(input_size, output_size, keep_master_weight_for_test=True).cuda()\n loss_weight = torch.randn([batch_size, output_size]).cuda()\n # Forward\n input_ = identity_layer()\n output = linear_layer(input_)\n loss = torch.mul(output, loss_weight).sum()\n # Backward\n loss.backward()\n\n # Values.\n dLdY = loss_weight\n X = identity_layer.weight\n A = linear_layer.master_weight.cuda()\n dLdA = torch.matmul(dLdY.t(), X)\n dLdb = torch.matmul(torch.ones(batch_size, 1).cuda().t(), dLdY).view(-1)\n dLdX = torch.matmul(dLdY, A)\n\n rank = mpu.get_model_parallel_rank()\n my_dLdA = torch.split(dLdA, input_size_coeff, dim=1)[rank].contiguous().clone()\n error = my_dLdA.sub(linear_layer.weight.grad).abs().max()\n torch.distributed.barrier()\n print(\" error in dLdA on global rank {}: {}\".format(torch.distributed.get_rank(), error))\n assert error < 1.0e-6\n\n error = dLdb.sub(linear_layer.bias.grad).abs().max()\n torch.distributed.barrier()\n print(\" error in dLdb on global rank {}: {}\".format(torch.distributed.get_rank(), error))\n assert error < 1.0e-6\n\n error = dLdX.sub(identity_layer.weight.grad).abs().max()\n torch.distributed.barrier()\n print(\" error in dLdX on global rank {}: {}\".format(torch.distributed.get_rank(), error))\n assert error < 1.0e-6\n\n # Reset groups\n mpu.destroy_model_parallel()\n\n torch.distributed.barrier()\n if torch.distributed.get_rank() == 0:\n print(\" >> passed the test :-)\")\n\n\ndef run_test_pipe(rank, world_size, filename, filename_rpc, skip_dist_init=False):\n pipe_world_size = 2\n\n if world_size == 1:\n return\n\n if not skip_dist_init:\n dist_init(rank, world_size, filename, filename_rpc)\n else:\n os.environ[\"MASTER_ADDR\"] = \"localhost\"\n os.environ[\"MASTER_PORT\"] = \"29502\"\n rpc.init_rpc(f\"Test{rank}\", rank=rank, world_size=world_size)\n\n mpu.initialize_model_parallel(world_size / pipe_world_size, pipe_world_size)\n model_parallel_size = mpu.get_model_parallel_world_size()\n if torch.distributed.get_rank() == 0:\n print(\n \"> testing Sequential + MultiProcessPipe with model parallel size: {}, pipe: {}\".format(\n model_parallel_size, pipe_world_size\n )\n )\n chunk_size = 4\n\n seed = 12345\n set_random_seed(seed)\n input_size_coeff = 3\n input_size = input_size_coeff * model_parallel_size\n output_size_coeff = 7\n output_size = output_size_coeff * model_parallel_size\n batch_size = 3 * chunk_size\n\n target = torch.rand((batch_size, input_size), requires_grad=True).cuda()\n print(f\"target = {target}\")\n\n identity = IdentityLayer2D(batch_size, input_size).cuda()\n\n pipeline_devices = mpu.get_pipeline_parallel_group()\n\n set_random_seed(seed)\n model = nn.Sequential(\n layers.ColumnParallelLinear(input_size, output_size, keep_master_weight_for_test=True, bias=False).cuda(),\n nn.ReLU(),\n layers.RowParallelLinear(output_size, input_size, keep_master_weight_for_test=True, bias=False).cuda(),\n )\n set_random_seed(seed)\n\n reference = [\n nn.Linear(input_size, output_size, bias=False).cuda(),\n nn.ReLU(),\n nn.Linear(output_size, input_size, bias=False).cuda(),\n ]\n\n print(f\"setup {reference[0].weight.size()}, {model[0].weight.size()}, {(input_size, output_size)}\")\n print(f\"setup {reference[2].weight.size()}, {(output_size, input_size)}\")\n\n reference[0].weight = Parameter(model[0].get_master_weight().clone()).cuda()\n reference[2].weight = Parameter(model[2].get_master_weight().clone()).cuda()\n\n reference = nn.Sequential(*reference)\n\n def grad_graph(depth, grad):\n result = depth * \" \" + str(grad)\n if grad:\n for x in grad.next_functions:\n result += \"\\n\" + grad_graph(depth + 1, x[0])\n return result\n\n def check_weights(x, y, key: str, index=None):\n for i in [2, 0]:\n if index is not None and i != index:\n continue\n left = x[i].get_master_weight()\n right = y[i].weight.data\n if not torch.allclose(left, right, atol=1.0e-6) or index is not None:\n print(f\"check_weights {key}-{i}: left = {left}, \\nright = {right}\")\n if not torch.equal(left, right):\n print(f\"check_weights NOT_EQUAL {key}-{i}: left = {left}, \\nright = {right}\")\n assert torch.allclose(left, right, atol=1.0e-6)\n\n def dump_opt_params(opt):\n for i, group in enumerate(opt.param_groups):\n for j, p in enumerate(group[\"params\"]):\n print(f\"{torch.distributed.get_rank()}:param {(i,j)} = {p}\")\n print(f\"{torch.distributed.get_rank()}:param.grad {(i,j)} = {p.grad}\")\n\n def forward_model(model_, target, step=False):\n optimizer = torch.optim.SGD(model_.parameters(), lr=0.01, momentum=0.9)\n optimizer.zero_grad()\n model_.zero_grad()\n output = model_(identity())\n loss = nn.MSELoss()\n model_.zero_grad()\n if step:\n loss(output, target).backward()\n saved_weight_0 = model_[0].weight.data.clone()\n saved_weight_2 = model_[2].weight.data.clone()\n dump_opt_params(optimizer)\n optimizer.step()\n assert not torch.allclose(saved_weight_0, model_[0].weight.data, atol=1.0e-6)\n assert not torch.allclose(saved_weight_2, model_[2].weight.data, atol=1.0e-6)\n return output\n\n output = forward_model(model, target)\n reference_output = forward_model(reference, target)\n\n error = reference_output.sub(output).max()\n torch.distributed.barrier()\n assert error < 1.0e-6\n\n output = forward_model(model, target)\n error = reference_output.sub(output).max()\n torch.distributed.barrier()\n assert error < 1.0e-6\n\n output = forward_model(model, target)\n error = reference_output.sub(output).max()\n torch.distributed.barrier()\n assert error < 1.0e-6\n\n check_weights(model, reference, \"before\")\n saved_weight_0 = model[0].weight.data.clone()\n saved_weight_2 = model[2].weight.data.clone()\n output = forward_model(model, target, step=True)\n error = reference_output.sub(output).max()\n assert error < 1.0e-6\n model[0].weight.data = saved_weight_0\n model[2].weight.data = saved_weight_2\n\n worker_map = {i: f\"Test{i}\" for i in range(torch.distributed.get_world_size())}\n\n if pipe_world_size == 2:\n print(\"actually doing pipe stuff now\")\n assert torch.equal(saved_weight_0, model[0].weight.data)\n assert torch.equal(saved_weight_2, model[2].weight.data)\n pipe_model = MultiProcessPipe(\n model,\n [2, 1],\n group=pipeline_devices,\n worker_map=worker_map,\n input_device=torch.cuda.current_device(),\n chunks=chunk_size,\n ).cuda()\n torch.distributed.barrier()\n pipe_rank = torch.distributed.get_rank(group=mpu.get_pipeline_parallel_group())\n print(f\"pipe rank is {pipe_rank}\")\n if pipe_rank == 0:\n assert torch.equal(saved_weight_0, pipe_model[0].weight.data)\n else:\n if not torch.equal(saved_weight_2, pipe_model[0].weight.data):\n print(f\"ne {pipe_rank}: left\\n{saved_weight_2}\\nright:\\n{pipe_model[0].weight.data}\")\n assert torch.equal(saved_weight_2, pipe_model[0].weight.data)\n optimizer = torch.optim.SGD(pipe_model.parameters(), lr=0.01, momentum=0.9)\n optimizer.zero_grad()\n if pipe_rank == 0:\n assert torch.equal(saved_weight_0, pipe_model[0].weight.data)\n print(f\"runner {rank}:\\n{pipe_model[0].weight.data}\")\n else:\n assert torch.equal(saved_weight_2, pipe_model[0].weight.data)\n print(f\"runner {rank}:\\n{pipe_model[0].weight.data}\")\n\n if torch.distributed.get_rank(mpu.get_pipeline_parallel_group()) == 1:\n check_weights(model, reference, \"pre-pipe\", index=2)\n else:\n check_weights(model, reference, \"pre-pipe\", index=0)\n\n pipe_output = pipe_model(identity())\n print(f\"exited pipe for {rank}\")\n forward_model(reference, target, step=True)\n\n print(f\"pipe_output {rank} = {pipe_output}\")\n print(f\"reference_output {rank} = {reference_output}\")\n\n torch.distributed.barrier()\n\n if torch.distributed.get_rank(mpu.get_pipeline_parallel_group()) == 1:\n error = reference_output.sub(pipe_output.cuda()).max()\n if error >= 1.0e-6:\n print(f\"error bad {error}\")\n assert error < 1.0e-6\n\n loss = nn.MSELoss()\n failed = False\n pipe_output.retain_grad()\n with torch.autograd.profiler.profile() as prof:\n try:\n loss(pipe_output, target).backward()\n except Exception as e:\n failed = True\n print(f\"got {e} while doing backward, deadlock?\")\n if failed:\n raise RuntimeError(\"failed somehow\")\n dump_opt_params(optimizer)\n optimizer.step()\n\n print(\"calling check_weights on master\")\n check_weights(model, reference, \"pipe\", index=2)\n print(f\"waiting for barrier on master, pid={os.getpid()}\")\n else:\n print(f\"calling backwards on slave, pid={os.getpid()}\")\n failed = False\n with torch.autograd.profiler.profile() as prof:\n try:\n pipe_model.back_helper(pipe_output)\n except Exception as e:\n failed = True\n print(f\"got {e} while doing backward, deadlock?\")\n if failed:\n raise RuntimeError(\"failed somehow\")\n dump_opt_params(optimizer)\n print(\"calling step on slave\")\n optimizer.step()\n print(\"calling check_weights on slave\")\n check_weights(model, reference, \"pipe\", index=0)\n print(\"waiting for barrier on slave\")\n\n pipe_model.zero_grad()\n torch.distributed.barrier()\n\n pipe_model.eval()\n pipe_output = pipe_model(identity())\n updated_ref_output = forward_model(reference, target)\n if torch.distributed.get_rank(mpu.get_pipeline_parallel_group()) == 1:\n error = updated_ref_output.sub(pipe_output.cuda()).max()\n print(f\"outputs are ref:\\n{updated_ref_output}\\npipe:\\n{pipe_output}\")\n assert error < 1.0e-6\n torch.distributed.barrier()\n\n print(f\"finished waiting for barrier on, pid={os.getpid()}\")\n\n print(f\"really exited pipe for {rank}\")\n\n rpc.shutdown()\n torch.distributed.destroy_process_group()\n\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n\ndef test_affine_weight():\n spawn_for_all_world_sizes(run_test_initialize_affine_weight)\n\n\ndef test_embedding():\n spawn_for_all_world_sizes(run_test_parallel_embedding)\n\n\ndef test_column_parallel():\n spawn_for_all_world_sizes(run_test_column_parallel_linear)\n\n\[email protected](\"OMPI_COMM_WORLD_RANK\" not in os.environ, reason=\"only works on mpi\")\ndef test_row_parallel():\n spawn_for_all_world_sizes(run_test_row_parallel_linear)\n\n\n@torch_spawn([2])\[email protected](\"OMPI_COMM_WORLD_RANK\" not in os.environ, reason=\"only works on mpi\")\[email protected](not torch.cuda.is_available(), reason=\"cuda required\")\ndef mpi_pipe():\n mpu.destroy_model_parallel()\n _, tempfile_init = tempfile.mkstemp()\n _, tempfile_rpc_init = tempfile.mkstemp()\n\n run_test_pipe(\n torch.distributed.get_rank(),\n torch.distributed.get_world_size(),\n tempfile_init,\n tempfile_rpc_init,\n skip_dist_init=True,\n )\n\n\[email protected](not torch.cuda.is_available(), reason=\"cuda required\")\ndef test_pipe_layer():\n world_sizes = [x for x in get_world_sizes() if x <= torch.cuda.device_count() / 2]\n\n spawn_for_all_world_sizes(run_test_pipe, args=[False])\n\n\[email protected](not torch.cuda.is_available(), reason=\"cuda required\")\[email protected](reason=\"potential deadlock in nccl with multiple processes using the same gpu\")\ndef test_eight_pipe_layer():\n world_sizes = [x for x in get_world_sizes() if x <= torch.cuda.device_count() / 2]\n\n spawn_for_all_world_sizes(run_test_pipe, [8])\n" ]
[ [ "torch.nn.Embedding", "torch.cuda.is_available", "torch.split", "torch.distributed.get_rank", "torch.allclose", "torch.ones", "torch.randn", "torch.distributed.barrier", "torch.equal", "torch.mul", "torch.rand", "torch.nn.Sequential", "torch.LongTensor", "torch.empty", "torch.cuda.current_device", "torch.nn.init.xavier_normal_", "torch.nn.Linear", "torch.nn.init.normal_", "torch.distributed.destroy_process_group", "torch.distributed.rpc.shutdown", "torch.distributed.rpc.init_rpc", "torch.distributed.get_world_size", "torch.cuda.device_count", "torch.Tensor", "torch.matmul", "torch.nn.ReLU", "torch.autograd.profiler.profile", "torch.nn.MSELoss" ] ]
neilshah13/capstone21
[ "1be9175d70041cb3ee429f31dd51dd11c7ab39af" ]
[ "python_backend/triton_client/tao_triton/python/postprocessing/trafficcamnet_processor.py" ]
[ "# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n# \n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n# \n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Simple class to run post processing of Detectnet-v2 Triton Inference outputs.\"\"\"\n\nimport os\n\nimport numpy as np\nfrom sklearn.cluster import DBSCAN as dbscan\nfrom google.protobuf.text_format import Merge as merge_text_proto\n\nfrom tao_triton.python.postprocessing.postprocessor import Postprocessor\nimport tao_triton.python.proto.postprocessor_config_pb2 as postprocessor_config_pb2\nfrom tao_triton.python.types import KittiBbox\nfrom tao_triton.python.postprocessing.utils import (\n denormalize_bounding_bboxes,\n iou_vectorized,\n pool_context,\n render_image,\n thresholded_indices,\n return_bbox_info\n)\nfrom tao_triton.python.utils.kitti import write_kitti_annotation\nfrom PIL import Image\n\ndef load_clustering_config(config):\n \"\"\"Load the clustering config.\"\"\"\n proto = postprocessor_config_pb2.PostprocessingConfig()\n def _load_from_file(filename, pb2):\n if not os.path.exists(filename):\n raise IOError(\"Specfile not found at: {}\".format(filename))\n with open(filename, \"r\") as f:\n merge_text_proto(f.read(), pb2)\n _load_from_file(config, proto)\n return proto\n \n\nclass TrafficCamNetPostprocessor(Postprocessor):\n \"\"\"Post processor for Triton outputs from TrafficCamNet.\"\"\"\n\n def __init__(self, batch_size, frames,\n output_path, data_format, classes,\n postprocessing_config, target_shape):\n \"\"\"Initialize a post processor class for a classification model.\n \n Args:\n batch_size (int): Number of images in the batch.\n frames (list): List of images.\n output_path (str): Unix path to the output rendered images and labels.\n data_format (str): Order of the input model dimensions.\n \"channels_first\": CHW order.\n \"channels_last\": HWC order.\n classes (list): List of the class names.\n postprocessing_config (proto): Configuration elements of the dbscan postprocessor.\n target_shape (tuple): Shape of the model input.\n \"\"\"\n self.pproc_config = load_clustering_config(postprocessing_config)\n self.classes = classes\n self.output_names = [\"output_cov/Sigmoid\",\n \"output_bbox/BiasAdd\"]\n self.bbox_norm = [35., 35]\n self.offset = 0.5\n self.scale_h = 1\n self.scale_w = 1\n self.target_shape = target_shape\n self.stride = self.pproc_config.stride\n super().__init__(batch_size, frames, output_path, data_format)\n # Format the dbscan elements into classwise configurations for rendering.\n self.configure()\n\n def configure(self):\n \"\"\"Configure the post processor object.\"\"\"\n self.dbscan_elements = {}\n self.coverage_thresholds = {}\n self.box_color = {}\n classwise_clustering_config = self.pproc_config.classwise_clustering_config\n for class_name in self.classes:\n if class_name not in classwise_clustering_config.keys():\n raise KeyError(\"Cannot find class name {} in {}\".format(\n class_name, self.pproc_config.keys()\n ))\n self.dbscan_elements[class_name] = dbscan(\n eps=classwise_clustering_config[class_name].dbscan_config.dbscan_eps,\n min_samples=classwise_clustering_config[class_name].dbscan_config.dbscan_min_samples,\n )\n self.coverage_thresholds[class_name] = classwise_clustering_config[class_name].coverage_threshold\n self.box_color[class_name] = classwise_clustering_config[class_name].bbox_color\n\n def apply(self, results, this_id, render=True):\n \"\"\"Apply the post processing to the outputs tensors.\n \n This function takes the raw output tensors from the detectnet_v2 model\n and performs the following steps:\n\n 1. Denormalize the output bbox coordinates which converts bbox from relative coordinates to absolute coordinates.\n 2. Threshold the coverage output to get the valid indices for the bboxes based on a coverage threshold. This coverage output is attained from the \"output_cov/Sigmoid returns from the model inference.\n 3. Cluster the filterred boxes using DBSCAN. This utilises the IOU between possible predicted rectangles and clusters them to output the best bbox.\n 4. Converts filtered boxes into KittiBbox output format with the final absolute coordinates of bbox and confidence scores\n\n # 1. Denormalize the output bbox coordinates which converts bbox from relative coordinates to absolute coordinates.\n # 2. Threshold the coverage output to get the valid indices for the bboxes based on a pre set coverage threshold.\n # 3. Filter out the bboxes from the \"output_bbox/BiasAdd\" blob.\n # 4. Cluster the filterred boxes using DBSCAN.\n # 5. Converts filtered boxes into KittiBbox output format with the final absolute coordinates of bbox and confidence scores\n # 6. Serialize the output bboxes to KITTI Format label files in output_path/labels.\n \"\"\"\n\n output_array = {}\n this_id = int(this_id)\n for output_name in self.output_names:\n output_array[output_name] = results.as_numpy(output_name).transpose(0, 1, 3, 2)\n assert len(self.classes) == output_array[\"output_cov/Sigmoid\"].shape[1], (\n \"Number of classes {} != number of dimensions in the output_cov/Sigmoid: {}\".format(\n len(self.classes), output_array[\"output_cov/Sigmoid\"].shape[1]\n )\n )\n abs_bbox = denormalize_bounding_bboxes(\n output_array[\"output_bbox/BiasAdd\"], self.stride,\n self.offset, self.bbox_norm, len(self.classes), self.scale_w,\n self.scale_h, self.data_format, self.target_shape, self.frames,\n this_id - 1\n )\n valid_indices = thresholded_indices(\n output_array[\"output_cov/Sigmoid\"], len(self.classes),\n self.classes,\n self.coverage_thresholds\n )\n batchwise_boxes = []\n for image_idx, indices in enumerate(valid_indices):\n covs = output_array[\"output_cov/Sigmoid\"][image_idx, :, :, :]\n bboxes = abs_bbox[image_idx, :, :, :]\n imagewise_boxes = []\n for class_idx in [0]:\n # for class_idx in range(len(self.classes)):\n clustered_boxes = []\n cw_config = self.pproc_config.classwise_clustering_config[\n self.classes[class_idx]\n ]\n classwise_covs = covs[class_idx, :, :].flatten()\n classwise_covs = classwise_covs[indices[class_idx]]\n if classwise_covs.size == 0:\n continue\n classwise_bboxes = bboxes[4*class_idx:4*class_idx+4, :, :]\n classwise_bboxes = classwise_bboxes.reshape(\n classwise_bboxes.shape[:1] + (-1,)\n ).T[indices[class_idx]]\n pairwise_dist = \\\n 1.0 * (1.0 - iou_vectorized(classwise_bboxes))\n labeling = self.dbscan_elements[self.classes[class_idx]].fit_predict(\n X=pairwise_dist,\n sample_weight=classwise_covs\n )\n labels = np.unique(labeling[labeling >= 0])\n for label in labels:\n w = classwise_covs[labeling == label]\n aggregated_w = np.sum(w)\n w_norm = w / aggregated_w\n n = len(w)\n w_max = np.max(w)\n w_min = np.min(w)\n b = classwise_bboxes[labeling == label]\n mean_bbox = np.sum((b.T*w_norm).T, axis=0)\n mean_bbox = np.array(mean_bbox, dtype='float64')\n\n # Compute coefficient of variation of the box coords\n mean_box_w = mean_bbox[2] - mean_bbox[0]\n mean_box_h = mean_bbox[3] - mean_bbox[1]\n bbox_area = mean_box_w * mean_box_h\n valid_box = aggregated_w > cw_config.dbscan_config.\\\n dbscan_confidence_threshold and mean_box_h > cw_config.minimum_bounding_box_height\n if valid_box:\n clustered_boxes.append(\n KittiBbox(\n self.classes[class_idx], 0, 0, 0,\n mean_bbox, 0, 0, 0, 0,\n 0, 0, 0, confidence_score=np.float64(aggregated_w)\n )\n )\n else:\n continue\n imagewise_boxes.extend(clustered_boxes)\n batchwise_boxes.append(imagewise_boxes)\n\n if render:\n with pool_context(self.batch_size) as pool:\n batch_boxes_output = []\n for image_idx in range(self.batch_size):\n current_idx = (this_id - 1) * self.batch_size + image_idx\n if current_idx >= len(self.frames):\n break\n current_frame = self.frames[current_idx]\n filename = os.path.basename(current_frame._image_path)\n \n #Returns BBOX of all license plates in it\n final_bboxes = return_bbox_info(current_frame, batchwise_boxes[image_idx])\n batch_boxes_output.append([final_bboxes, filename])\n return batch_boxes_output\n" ]
[ [ "numpy.min", "numpy.unique", "sklearn.cluster.DBSCAN", "numpy.max", "numpy.float64", "numpy.array", "numpy.sum" ] ]
rezoo/chainer_computational_cost
[ "987b0a2cd7670390ca0d69152214d6bc8f656c7b" ]
[ "tests/test_cost_calculators/test_connection.py" ]
[ "import chainer.functions as F\nimport numpy as np\n\nfrom chainer.functions.connection.convolution_2d \\\n import Convolution2DFunction\nfrom chainer.functions.connection.deconvolution_2d \\\n import Deconvolution2DFunction\nfrom chainer.functions.connection.linear import LinearFunction\n\nfrom helpers import calculate_cost\nfrom helpers import require_chainer_version\nfrom helpers import require_import\n\n\ndef test_conv2d_with_bias_fma():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (3, 10, 10), (12, 10, 10)\n k = 3\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_out, c_in, k, k).astype(np.float32)\n b = np.random.randn(c_out).astype(np.float32)\n f = Convolution2DFunction(pad=(np.int64(1), np.int64(1)))\n flops, mr, mw, params = calculate_cost(f, [x, W, b], fma_1flop=True)\n\n assert f.apply([x, W, b])[0].shape == (1, c_out, h_out, w_out)\n assert flops == (c_in * c_out * k * k * h_out * w_out)\n assert mr == c_in * h_in * w_in + c_out * c_in * k * k + c_out\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 1, 'p': 1, 'd': 1,\n 'groups': 1, 'nobias': False\n }\n assert type(params['k']) is int\n assert type(params['s']) is int\n assert type(params['p']) is int\n assert type(params['d']) is int\n assert type(params['groups']) is int\n\n\ndef test_conv2d_nobias_fma():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (3, 10, 10), (12, 10, 10)\n k = 3\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_out, c_in, k, k).astype(np.float32)\n f = Convolution2DFunction(pad=1)\n flops, mr, mw, params = calculate_cost(f, [x, W], fma_1flop=True)\n assert f.apply([x, W])[0].shape == (1, c_out, h_out, w_out)\n assert flops == (c_in * c_out * k * k * h_out * w_out)\n assert mr == c_in * h_in * w_in + c_out * c_in * k * k\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 1, 'p': 1, 'd': 1,\n 'groups': 1, 'nobias': True\n }\n\n\ndef test_conv2d_with_bias_no_fma():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (3, 10, 10), (12, 10, 10)\n k = 3\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_out, c_in, k, k).astype(np.float32)\n b = np.random.randn(c_out).astype(np.float32)\n f = Convolution2DFunction(pad=1)\n flops, mr, mw, params = calculate_cost(f, [x, W, b], fma_1flop=False)\n assert f.apply([x, W, b])[0].shape == (1, c_out, h_out, w_out)\n assert flops == 2 * (c_in * c_out * k * k * h_out * w_out)\n assert mr == c_in * h_in * w_in + c_out * c_in * k * k + c_out\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 1, 'p': 1, 'd': 1,\n 'groups': 1, 'nobias': False\n }\n\n\ndef test_conv2d_nobias_no_fma():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (3, 10, 10), (12, 10, 10)\n k = 3\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_out, c_in, k, k).astype(np.float32)\n f = Convolution2DFunction(pad=1)\n flops, mr, mw, params = calculate_cost(f, [x, W], fma_1flop=False)\n assert f.apply([x, W])[0].shape == (1, c_out, h_out, w_out)\n assert flops == 2 * (c_in * c_out - 1) * k * k * h_out * w_out\n assert mr == c_in * h_in * w_in + c_out * c_in * k * k\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 1, 'p': 1, 'd': 1,\n 'groups': 1, 'nobias': True\n }\n\n\n@require_chainer_version('4.0.0')\ndef test_conv2d_grouped_with_bias_fma():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (8, 10, 10), (12, 10, 10)\n k = 3\n g = 2\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_out, c_in // g, k, k).astype(np.float32)\n b = np.random.randn(c_out).astype(np.float32)\n f = Convolution2DFunction(pad=1, groups=g)\n flops, mr, mw, params = calculate_cost(f, [x, W, b], fma_1flop=True)\n assert f.apply([x, W, b])[0].shape == (1, c_out, h_out, w_out)\n assert flops == (c_in * c_out * k * k * h_out * w_out) // g\n assert mr == c_in * h_in * w_in + c_out * c_in * k * k // g + c_out\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 1, 'p': 1, 'd': 1,\n 'groups': g, 'nobias': False\n }\n\n\n@require_chainer_version('4.0.0')\ndef test_conv2d_grouped_nobias_no_fma():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (8, 10, 10), (12, 10, 10)\n k = 3\n g = 2\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_out, c_in // g, k, k).astype(np.float32)\n f = Convolution2DFunction(pad=1, groups=g)\n flops, mr, mw, params = calculate_cost(f, [x, W], fma_1flop=False)\n assert f.apply([x, W])[0].shape == (1, c_out, h_out, w_out)\n assert flops == 2 * g * (c_in * c_out // g**2 - 1) * k * k * h_out * w_out\n assert mr == c_in * h_in * w_in + c_out * c_in * k * k // g\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 1, 'p': 1, 'd': 1,\n 'groups': g, 'nobias': True\n }\n\n\ndef test_deconv2d_with_bias_fma():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (3, 10, 10), (12, 21, 21)\n k = 3\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_in, c_out, k, k).astype(np.float32)\n b = np.random.randn(c_out).astype(np.float32)\n f = Deconvolution2DFunction(stride=2, pad=0)\n flops, mr, mw, params = calculate_cost(f, [x, W, b], fma_1flop=True)\n\n assert f.apply([x, W, b])[0].shape == (1, c_out, h_out, w_out)\n assert flops == c_in * c_out * k * k * h_in * w_in\n assert mr == c_in * h_in * w_in + c_in * c_out * k * k + c_out\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 2, 'p': 0, 'd': 1,\n 'groups': 1, 'nobias': False\n }\n\n\ndef test_deconv2d_with_nobias_fma():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (3, 10, 10), (12, 21, 21)\n k = 3\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_in, c_out, k, k).astype(np.float32)\n f = Deconvolution2DFunction(stride=2, pad=0)\n flops, mr, mw, params = calculate_cost(f, [x, W], fma_1flop=True)\n assert f.apply([x, W])[0].shape == (1, c_out, h_out, w_out)\n assert flops == c_in * c_out * k * k * h_in * w_in\n assert mr == c_in * h_in * w_in + c_in * c_out * k * k\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 2, 'p': 0, 'd': 1,\n 'groups': 1, 'nobias': True\n }\n\n\ndef test_deconv2d_with_bias_no_fma():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (3, 10, 10), (12, 21, 21)\n k = 3\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_in, c_out, k, k).astype(np.float32)\n b = np.random.randn(c_out).astype(np.float32)\n f = Deconvolution2DFunction(stride=2, pad=0)\n flops, mr, mw, params = calculate_cost(f, [x, W, b], fma_1flop=False)\n assert f.apply([x, W, b])[0].shape == (1, c_out, h_out, w_out)\n assert flops == 2 * c_in * c_out * k * k * h_in * w_in\n assert mr == c_in * h_in * w_in + c_in * c_out * k * k + c_out\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 2, 'p': 0, 'd': 1,\n 'groups': 1, 'nobias': False\n }\n\n\ndef test_deconv2d_with_nobias_no_fma():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (3, 10, 10), (12, 21, 21)\n k = 3\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_in, c_out, k, k).astype(np.float32)\n f = Deconvolution2DFunction(stride=2, pad=0)\n flops, mr, mw, params = calculate_cost(f, [x, W], fma_1flop=False)\n assert f.apply([x, W])[0].shape == (1, c_out, h_out, w_out)\n assert flops == 2 * c_in * c_out * k * k * h_in * w_in\n assert mr == c_in * h_in * w_in + c_in * c_out * k * k\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 2, 'p': 0, 'd': 1,\n 'groups': 1, 'nobias': True\n }\n\n\n@require_chainer_version('4.0.0')\ndef test_deconv2d_grouped_with_bias_fma():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (8, 10, 10), (12, 21, 21)\n k = 3\n g = 2\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_in, c_out // g, k, k).astype(np.float32)\n b = np.random.randn(c_out).astype(np.float32)\n f = Deconvolution2DFunction(stride=2, pad=0, groups=g)\n flops, mr, mw, params = calculate_cost(f, [x, W, b], fma_1flop=True)\n assert f.apply([x, W, b])[0].shape == (1, c_out, h_out, w_out)\n assert flops == c_in * c_out * k * k * h_in * w_in // g\n assert mr == c_in * h_in * w_in + c_in * c_out * k * k // g + c_out\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 2, 'p': 0, 'd': 1,\n 'groups': 2, 'nobias': False\n }\n\n\n@require_chainer_version('4.0.0') # dilate option requires v4.0.0\ndef test_deconv2d_with_bias_fma_dilate():\n (c_in, h_in, w_in), (c_out, h_out, w_out) = (3, 10, 10), (12, 25, 25)\n k = 3\n d = 3\n\n x = np.random.randn(1, c_in, h_in, w_in).astype(np.float32)\n W = np.random.randn(c_in, c_out, k, k).astype(np.float32)\n b = np.random.randn(c_out).astype(np.float32)\n f = Deconvolution2DFunction(stride=2, pad=0, dilate=d)\n flops, mr, mw, params = calculate_cost(f, [x, W, b], fma_1flop=True)\n\n assert f.apply([x, W, b])[0].shape == (1, c_out, h_out, w_out)\n assert flops == c_in * c_out * k * k * h_in * w_in\n assert mr == c_in * h_in * w_in + c_in * c_out * k * k + c_out\n assert mw == c_out * h_out * w_out\n assert params == {\n 'k': k, 's': 2, 'p': 0, 'd': d,\n 'groups': 1, 'nobias': False\n }\n\n\ndef test_linear_nobias_fma():\n x = np.random.randn(1, 10).astype(np.float32)\n w = np.random.randn(20, 10).astype(np.float32)\n f = LinearFunction()\n flops, mr, mw, params = calculate_cost(f, [x, w], fma_1flop=True)\n assert flops == 10 * 20\n assert mr == 10 + 10 * 20 # input data, and weight matrix\n assert mw == 20\n assert params == {'nobias': True}\n\n\ndef test_linear_nobias_no_fma():\n x = np.random.randn(1, 10).astype(np.float32)\n w = np.random.randn(20, 10).astype(np.float32)\n f = LinearFunction()\n flops, mr, mw, params = calculate_cost(f, [x, w], fma_1flop=False)\n # for each output neuron, weight multiplication is applied 10 times and\n # addition (10-1) times.\n assert flops == (10 + 10 - 1) * 20\n assert mr == 10 + 10 * 20\n assert mw == 20\n assert params == {'nobias': True}\n\n\ndef test_linear_withbias_fma():\n x = np.random.randn(1, 10).astype(np.float32)\n w = np.random.randn(20, 10).astype(np.float32)\n b = np.random.randn(20).astype(np.float32)\n f = LinearFunction()\n flops, mr, mw, params = calculate_cost(f, [x, w, b], fma_1flop=True)\n assert flops == 10 * 20 + 20\n assert mr == 10 * 20 + 10 + 20 # input data, weight matrix, and bias\n assert mw == 20\n assert params == {'nobias': False}\n\n\n@require_import('chainer.functions.connection.shift.Shift')\ndef test_shift():\n x = np.random.randn(1, 32, 10, 10).astype(np.float32)\n f = F.connection.shift.Shift(ksize=3, dilate=1)\n flops, mr, mw, params = calculate_cost(f, [x])\n assert flops == 0 # exclude index calculation\n assert mr == x.size\n assert mw == x.size\n assert params == {'k': 3, 'd': 1}\n" ]
[ [ "numpy.int64", "numpy.random.randn" ] ]
zaman13/Optoelectronic-tweezers-interface
[ "a43440b9035a69ee54c5b34fe49e8b3dd0ac8d11" ]
[ "Codes/OET_interface_v0.5.py" ]
[ "\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 15 20:34:23 2021\n\n@author: Mohammad Asif Zaman\n\n\nKeyboard commands:\n \nx : Quit/exit program \nArrow keys: Movement\nq : Increase object size\na : Decrease object size\nw : Increase width of the object\nd : Decrease width of the object\nCTRL + n : Create new object\nCTRL + 1 : Select object 1\nCTRL + 2 : Select object 2\nDelete : Delete selected object\n= : Select next object\n- : Select previous object\no : Create opening\nr : Rotate opening counter-clockwise\nf : Rotate opening clockwise\ne : Increase radius of the opening\nf : Decrease radius of the opening\nCTRL + r : Increase red color of the active object\nCTRL + g : Increase green color of the active object\nCTRL + b : Increase blue color of the active object\nALT + r : Decrease red color of the active object\nALT + g : Decrease green color of the active object\nALT + b : Decrease blue color of the active object \n\n\"\"\"\n\nimport pygame\nimport sys\nimport numpy as np\n\n\n\npygame.init()\nfps=60\nfpsclock=pygame.time.Clock()\nsur_obj=pygame.display.set_mode((1600,900),pygame.FULLSCREEN)\npygame.display.set_caption(\"Keyboard_Input\")\nWhite=(255,255,255)\nbackground_color=(0,0,0)\ndefault_foreground_color = (0,255,0)\n\npx_default = 120\npy_default = 120\nrot_default = 0\n\nradius_default = 70\nwidth_default = 6\nopening_radius_default = 10\n\nFR = [default_foreground_color[0]]\nFG = [default_foreground_color[1]]\nFB = [default_foreground_color[2]]\n\n\npx=[px_default] # center of the circles/rings\npy=[py_default] # center of the circles/rings\nrot = [0]\n\nstep=5\nstep_s=1\nstep_c = 5\nstep_rot = np.pi/100\n\n\n\ns1 = [radius_default] # circle/ring radius\n# s2 = [65,45]\ns2 = [width_default] # widht of the circles/rings\n\ns3 = [opening_radius_default] # radius of the opening\n# a circle of radius s3 and color = background_color is drawn to create the opening\n\n\nlp = True\n#border_width = 5\n\n\n# This function takes the previous coordinates and keyboard input to find the new coordinates of the object position\ndef move_object(active_ind, key_input):\n if key_input[pygame.K_LEFT]:\n px[active_ind] -= step\n if key_input[pygame.K_UP]:\n py[active_ind] -= step\n if key_input[pygame.K_RIGHT]:\n px[active_ind] += step\n if key_input[pygame.K_DOWN]:\n py[active_ind] += step\n\n return 0\n\n\n# Object scaling, rotation, color change function. \ndef modify_object(active_ind, key_input):\n if key_input[pygame.K_a]:\n s1[active_ind] -= step_s\n if key_input[pygame.K_q]:\n s1[active_ind] += step_s\n if key_input[pygame.K_s]:\n s2[active_ind] -= step_s\n if key_input[pygame.K_w]:\n s2[active_ind] += step_s\n if key_input[pygame.K_d]:\n s3[active_ind] -= step_s\n if key_input[pygame.K_e]:\n s3[active_ind] += step_s\n \n \n #rotate + change red color\n if key_input[pygame.K_r]:\n rot[active_ind] -= step_rot\n mods = pygame.key.get_mods()\n if mods & pygame.KMOD_CTRL:\n FR[active_ind] = FR[active_ind] + step_c if FR[active_ind] < 255 else FR[active_ind]\n if mods & pygame.KMOD_ALT:\n FR[active_ind] = FR[active_ind] - step_c if FR[active_ind] > step_c else FR[active_ind]\n\n # rotate \n if key_input[pygame.K_f]:\n rot[active_ind] += step_rot\n \n# change green color\n if key_input[pygame.K_g]:\n mods = pygame.key.get_mods()\n if mods & pygame.KMOD_CTRL:\n FG[active_ind] = FG[active_ind] + step_c if FG[active_ind] < 255 else FG[active_ind]\n if mods & pygame.KMOD_ALT:\n FG[active_ind] = FG[active_ind] - step_c if FG[active_ind] > step_c else FG[active_ind]\n\n# change blue color\n if key_input[pygame.K_b]:\n mods = pygame.key.get_mods()\n if mods & pygame.KMOD_CTRL:\n FB[active_ind] = FB[active_ind] + step_c if FB[active_ind] < 255 else FB[active_ind]\n if mods & pygame.KMOD_ALT:\n FB[active_ind] = FB[active_ind] - step_c if FB[active_ind] > step_c else FB[active_ind]\n \n \n \n \n if key_input[pygame.K_a]:\n mods = pygame.key.get_mods()\n \n if mods & pygame.KMOD_CTRL:\n s2[active_ind] = 10 + s2[active_ind]\n \n\n return 0\n\n\n\ndef rotate(x,y,xo,yo,theta): #rotate x,y around xo,yo by theta (rad)\n xr=np.cos(theta)*(x-xo)-np.sin(theta)*(y-yo) + xo\n yr=np.sin(theta)*(x-xo)+np.cos(theta)*(y-yo) + yo\n return [xr,yr]\n \n\n\ndef new_object(event,active_ind):\n if event.key == pygame.K_n:\n mods = pygame.key.get_mods()\n \n if mods & pygame.KMOD_CTRL:\n px.append(px_default)\n py.append(py_default)\n FR.append(default_foreground_color[0])\n FG.append(default_foreground_color[1])\n FB.append(default_foreground_color[2])\n s1.append(radius_default)\n s2.append(width_default)\n s3.append(opening_radius_default) \n rot.append(0)\n active_ind += 1\n \n return active_ind\n\n\n\n\n# change active object selection or delete selected object\ndef change_active_object(event,active_ind):\n # select next active object\n if event.key == pygame.K_EQUALS:\n if len(px) > active_ind + 1: \n active_ind += 1\n \n\n # select previous active object \n if event.key == pygame.K_MINUS:\n if active_ind > 0: \n active_ind -= 1 \n \n \n # delete active object\n if event.key == pygame.K_DELETE:\n del px[active_ind]\n del py[active_ind]\n del s1[active_ind]\n del s2[active_ind]\n del s3[active_ind]\n active_ind = 0\n \n \n return active_ind\n \n \n \n\n \n# Create opening in the circle\n\ndef open_circle(active_ind):\n # Draws a circle in black color to create opening in the object\n tx = px[active_ind]+s1[active_ind]-0.5*s2[active_ind] # x coordinate of the opening is set at the middle of the right rim along x axis\n ty = py[active_ind] # y coordinate of the opening is the same as the y coordinate of the object\n \n [tx,ty] = rotate(tx,ty,px[active_ind],py[active_ind],rot[active_ind])\n \n \n pygame.draw.circle(sur_obj, background_color, (tx, ty),s3[active_ind],0)\n return 0\n\n\n\ndef draw_window():\n \n for m in range(len(px)):\n # pygame.draw.rect(sur_obj, (255,0,0), (px[m], py[m], s1[m], s2[m]))\n pygame.draw.circle(sur_obj, (FR[m],FG[m],FB[m]), (px[m], py[m]),s1[m],s2[m])\n # pygame.draw.arc(sur_obj, (255,0,0), (px[m], py[m],s1[m],s1[m]),-3, 3, s2[m])\n # pygame.draw.arc(sur_obj, (255,0,0), (px[m]+0.4, py[m]-0.4,s1[m],s1[m]-1),-3, 3, s2[m])\n \n\n\nactive_ind = 0\n\nwhile lp:\n \n fpsclock.tick(fps)\n sur_obj.fill(background_color)\n \n key_input = pygame.key.get_pressed() \n \n \n \n move_object(active_ind, key_input)\n modify_object(active_ind, key_input)\n \n draw_window()\n \n\n\n if key_input[pygame.K_o]:\n open_circle(active_ind)\n \n if key_input[pygame.K_1]:\n mods = pygame.key.get_mods()\n \n if mods & pygame.KMOD_CTRL:\n active_ind = 0\n \n if key_input[pygame.K_2]:\n mods = pygame.key.get_mods()\n \n if mods & pygame.KMOD_CTRL:\n active_ind = 1 \n \n # The following events are addressed specifically so that multiple events aren't registered from a single click\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n active_ind = new_object(event, active_ind)\n active_ind = change_active_object(event, active_ind)\n\n\n \n\n\n\n \n \n\n \n \n # if key_input[pygame.K_5]:\n # pygame.draw.circle(sur_obj, (255,0,0), (px, py),s1)\n \n \n \n \n \n \n \n\n\n \n \n if key_input[pygame.K_x]:\n pygame.quit()\n \n \n \n pygame.display.update()\n fpsclock.tick(fps)\n \n \n\n" ]
[ [ "numpy.cos", "numpy.sin" ] ]
hatim-ez/berkeley-cs294-deep-rl
[ "8ffd415e8140b18f9456bc8560a099e98456bf05" ]
[ "hw4/model_based_policy.py" ]
[ "import tensorflow as tf\nimport tensorflow_probability as tfp\nimport numpy as np\n\nimport utils\n\n\nclass ModelBasedPolicy(object):\n\n def __init__(self,\n env,\n init_dataset,\n horizon=15,\n num_random_action_selection=4096,\n nn_layers=1,\n CEM=False):\n self._cost_fn = env.cost_fn\n self._state_dim = env.observation_space.shape[0]\n self._action_dim = env.action_space.shape[0]\n self._action_space_low = env.action_space.low\n self._action_space_high = env.action_space.high\n self._init_dataset = init_dataset\n self._horizon = horizon\n self._num_random_action_selection = num_random_action_selection\n self._nn_layers = nn_layers\n self._learning_rate = 1e-3\n self.CEM = CEM\n\n self._sess, self._state_ph, self._action_ph, self._next_state_ph,\\\n self._next_state_pred, self._loss, self._optimizer, self._best_action = self._setup_graph()\n\n def _setup_placeholders(self):\n \"\"\"\n Creates the placeholders used for training, prediction, and action selection\n\n returns:\n state_ph: current state\n action_ph: current_action\n next_state_ph: next state\n\n implementation details:\n (a) the placeholders should have 2 dimensions,\n in which the 1st dimension is variable length (i.e., None)\n \"\"\"\n ### PROBLEM 1\n ### YOUR CODE HERE\n state_ph = tf.placeholder(shape=[None, self._state_dim], name=\"st\", dtype=tf.float32)\n action_ph = tf.placeholder(shape=[None, self._action_dim], name=\"ac\", dtype=tf.float32)\n next_state_ph = tf.placeholder(shape=[None, self._state_dim], name=\"next_st\", dtype=tf.float32)\n\n return state_ph, action_ph, next_state_ph\n\n def _dynamics_func(self, state, action, reuse):\n \"\"\"\n Takes as input a state and action, and predicts the next state\n\n returns:\n next_state_pred: predicted next state\n\n implementation details (in order):\n (a) Normalize both the state and action by using the statistics of self._init_dataset and\n the utils.normalize function\n (b) Concatenate the normalized state and action\n (c) Pass the concatenated, normalized state-action tensor through a neural network with\n self._nn_layers number of layers using the function utils.build_mlp. The resulting output\n is the normalized predicted difference between the next state and the current state\n (d) Unnormalize the delta state prediction, and add it to the current state in order to produce\n the predicted next state\n\n \"\"\"\n ### PROBLEM 1\n ### YOUR CODE HERE\n # self._dynamics_func is supposed to take in a batch. The input state is assumed to be [None, self._state_dim].\n state_mean, state_std = self._init_dataset.state_mean, self._init_dataset.state_std\n action_mean, action_std = self._init_dataset.action_mean, self._init_dataset.action_std\n normalized_state = utils.normalize(state, mean=state_mean, std=state_std)\n normalized_action = utils.normalize(action, mean=action_mean, std=action_std)\n input_nn = tf.concat(values=[normalized_state, normalized_action], axis=1)\n #tf.concat([normalized_state, normalized_action], axis=-1) # np.concatenate(normalized_state, normalized_action)\n\n # print(\"unnormlaized state: \", state)\n # print(\"state: \", normalized_state.shape)\n # print(\"state: \", normalized_state)\n # print(\"action: \", normalized_action.shape)\n # print(\"type: \", type(normalized_action))\n # print(\"input_nn: : \", input_nn)\n\n normalized_delta_state_pred = utils.build_mlp(input_layer=input_nn,\n output_dim=self._state_dim,\n scope=\"dynamics_model\",\n n_layers=self._nn_layers,\n reuse=reuse)\n\n delta_state_pred = utils.unnormalize(normalized_delta_state_pred, self._init_dataset.delta_state_mean, self._init_dataset.delta_state_std)\n next_state_pred = tf.add(state, delta_state_pred)\n\n return next_state_pred\n\n def _setup_training(self, state_ph, next_state_ph, next_state_pred):\n \"\"\"\n Takes as input the current state, next state, and predicted next state, and returns\n the loss and optimizer for training the dynamics model\n\n returns:\n loss: Scalar loss tensor\n optimizer: Operation used to perform gradient descent\n\n implementation details (in order):\n (a) Compute both the actual state difference and the predicted state difference\n (b) Normalize both of these state differences by using the statistics of self._init_dataset and\n the utils.normalize function\n (c) The loss function is the mean-squared-error between the normalized state difference and\n normalized predicted state difference\n (d) Create the optimizer by minimizing the loss using the Adam optimizer with self._learning_rate\n\n \"\"\"\n ### PROBLEM 1\n ### YOUR CODE HERE\n\n delta_state = tf.subtract(next_state_ph, state_ph)\n delta_state_pred = tf.subtract(next_state_pred, state_ph)\n\n delta_state_mean = self._init_dataset.delta_state_mean\n delta_state_std = self._init_dataset.delta_state_std\n\n normalized_delta_state = utils.normalize(delta_state, delta_state_mean, delta_state_std)\n normalized_delta_state_pred = utils.normalize(delta_state_pred, delta_state_mean, delta_state_std)\n\n loss = tf.losses.mean_squared_error(normalized_delta_state, normalized_delta_state_pred)\n\n optimizer = tf.train.AdamOptimizer(self._learning_rate).minimize(loss)\n\n return loss, optimizer\n\n def _setup_action_selection(self, state_ph):\n \"\"\"\n Computes the best action from the current state by using randomly sampled action sequences\n to predict future states, evaluating these predictions according to a cost function,\n selecting the action sequence with the lowest cost, and returning the first action in that sequence\n\n returns:\n best_action: the action that minimizes the cost function (tensor with shape [self._action_dim])\n\n implementation details (in order):\n (a) We will assume state_ph has a batch size of 1 whenever action selection is performed\n (b) Randomly sample uniformly self._num_random_action_selection number of action sequences,\n each of length self._horizon\n (c) Starting from the input state, unroll each action sequence using your neural network\n dynamics model\n (d) While unrolling the action sequences, keep track of the cost of each action sequence\n using self._cost_fn\n (e) Find the action sequence with the lowest cost, and return the first action in that sequence\n\n Hints:\n (i) self._cost_fn takes three arguments: states, actions, and next states. These arguments are\n 2-dimensional tensors, where the 1st dimension is the batch size and the 2nd dimension is the\n state or action size\n (ii) You should call self._dynamics_func and self._cost_fn a total of self._horizon times\n (iii) Use tf.random_uniform(...) to generate the random action sequences\n\n \"\"\"\n ### PROBLEM 2\n ### YOUR CODE HERE\n\n if self.CEM:\n mu = (self._action_space_high + self._action_space_low)[0] / 2\n sigma = (self._action_space_high - self._action_space_low)[0] / 4\n noise = 1/100\n n_best_to_keep = self._num_random_action_selection//100\n epsilon = 1\n max_iter = 10\n\n costs = tf.zeros([self._num_random_action_selection])\n states = tf.concat([state_ph for i in range(self._num_random_action_selection)], axis=0)\n\n i = 0\n print('Starting Cross Entropy Method...')\n while i < max_iter: # or tf.reduce_max(sigma) > epsilon\n i += 1\n # Resample values farther than 2 sigma from mean.\n random_action_sequences = tf.truncated_normal(\n shape=[self._num_random_action_selection, self._horizon, self._action_dim],\n mean=mu,\n stddev=sigma\n )\n\n # unstack over the horizon axis to compute call self._dynamics_func and self._cost_fn a total of self._horizon times.\n for actions in tf.unstack(random_action_sequences, axis=1):\n next_states = self._dynamics_func(state=states, action=actions, reuse=True)\n costs = tf.add(costs, self._cost_fn(states=states, actions=actions, next_states=next_states))\n states = next_states\n\n indices_to_keep = tf.contrib.framework.argsort(\n costs,\n axis=0,\n direction='ASCENDING',\n stable=False,\n name=None\n )[0:n_best_to_keep]\n\n\n mu, sigma = tf.nn.moments(tf.gather(params=random_action_sequences, indices=indices_to_keep, axis=0)[:,0,:], axes=0) #, name=\"moments\")\n\n print('Cross Entropy Method update {0}.'.format(i))\n\n\n\n index_cost_min = tf.argmin(costs)\n best_action = random_action_sequences[index_cost_min][0]\n print('Best action selected.')\n\n\n else:\n costs = tf.zeros([self._num_random_action_selection])\n states = tf.concat([state_ph for i in range(self._num_random_action_selection)], axis=0)\n random_action_sequences = tf.random_uniform(shape=[self._num_random_action_selection, self._horizon, self._action_dim],\n minval=self._action_space_low,\n maxval=self._action_space_high)\n\n # unstack over the horizon axis to compute call self._dynamics_func and self._cost_fn a total of self._horizon times.\n for actions in tf.unstack(random_action_sequences, axis=1):\n next_states = self._dynamics_func(state=states, action=actions, reuse=True)\n costs = tf.add(costs, self._cost_fn(states=states, actions=actions, next_states=next_states))\n states = next_states\n\n # assert len(costs.get_shape()) == 1\n # assert costs.get_shape()[0].value == self._num_random_action_selection\n index_cost_min = tf.argmin(costs)\n best_action = random_action_sequences[index_cost_min][0]\n\n return best_action\n\n def _setup_graph(self):\n \"\"\"\n Sets up the tensorflow computation graph for training, prediction, and action selection\n\n The variables returned will be set as class attributes (see __init__)\n \"\"\"\n sess = tf.Session()\n\n ### PROBLEM 1\n ### YOUR CODE HERE\n state_ph, action_ph, next_state_ph = self._setup_placeholders()\n next_state_pred = self._dynamics_func(state=state_ph, action=action_ph, reuse=False)\n\n loss, optimizer = self._setup_training(state_ph=state_ph, next_state_ph=next_state_ph, next_state_pred=next_state_pred)\n ### PROBLEM 2\n ### YOUR CODE HERE\n best_action = self._setup_action_selection(state_ph=state_ph)\n\n sess.run(tf.global_variables_initializer())\n\n return sess, state_ph, action_ph, next_state_ph, \\\n next_state_pred, loss, optimizer, best_action\n\n def train_step(self, states, actions, next_states):\n \"\"\"\n Performs one step of gradient descent\n\n returns:\n loss: the loss from performing gradient descent\n \"\"\"\n ### PROBLEM 1\n ### YOUR CODE HERE\n _, loss = self._sess.run([self._optimizer, self._loss], feed_dict={self._state_ph: states, self._action_ph: actions, self._next_state_ph: next_states})\n\n return loss\n\n def predict(self, state, action):\n \"\"\"\n Predicts the next state given the current state and action\n\n returns:\n next_state_pred: predicted next state\n\n implementation detils:\n (i) The state and action arguments are 1-dimensional vectors (NO batch dimension)\n \"\"\"\n assert np.shape(state) == (self._state_dim,)\n assert np.shape(action) == (self._action_dim,)\n\n ### PROBLEM 1\n ### YOUR CODE HERE\n next_state_pred = self._sess.run(self._next_state_pred, feed_dict={self._state_ph: [state], self._action_ph: [action]})\n next_state_pred = np.squeeze(next_state_pred)\n\n assert np.shape(next_state_pred) == (self._state_dim,)\n return next_state_pred\n\n def get_action(self, state):\n \"\"\"\n Computes the action that minimizes the cost function given the current state\n\n returns:\n best_action: the best action\n \"\"\"\n assert np.shape(state) == (self._state_dim,)\n\n ### PROBLEM 2\n ### YOUR CODE HERE\n best_action = self._sess.run(self._best_action, feed_dict={self._state_ph: [state]})\n # mu = [var for var in tf.global_variables() if var.op.name==\"moments_24/Squeeze:0\"][0]\n # sigma = [var for var in tf.global_variables() if var.op.name==\"moments_24/Squeeze_1:0\"][0]\n # print(\"Actions mean: {0} \\n and std: {1}\".format(mu, sigma))\n\n assert np.shape(best_action) == (self._action_dim,)\n return best_action\n" ]
[ [ "tensorflow.losses.mean_squared_error", "tensorflow.concat", "tensorflow.truncated_normal", "tensorflow.unstack", "tensorflow.zeros", "numpy.squeeze", "tensorflow.placeholder", "tensorflow.contrib.framework.argsort", "tensorflow.subtract", "tensorflow.global_variables_initializer", "tensorflow.gather", "tensorflow.add", "numpy.shape", "tensorflow.Session", "tensorflow.train.AdamOptimizer", "tensorflow.random_uniform", "tensorflow.argmin" ] ]
itprorh66/SolarPV-Simulator
[ "0e689e608d4c1888dde82f506ad42c3291f33f60" ]
[ "SolarPV/NasaData.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 26 19:11:58 2018\nModified on 02/22/2019 for version 0.1.0\nModified on 02/04/2021 to simplify the logic and make better use of Pandas methods\n\n@author: Bob Hentz\n\n-------------------------------------------------------------------------------\n Name: NasaData.py\n Purpose: Retrieve Site specific information from Nasa Power Site using https request\n protocol\n \n Sample SinglePoint Data Request: \n https://asdc-arcgis.larc.nasa.gov/cgi-bin/power/v1beta/DataAccess.py?\n request=execute&identifier=SinglePoint&parameters=T2M,PS,\n ALLSKY_SFC_SW_DWN&\n startDate=20160301&endDate=20160331&\n userCommunity=SSE&tempAverage=DAILY&\n outputList=JSON,ASCII&lat=36&lon=45&user=anonymous\n\n Copyright: (c) Bob Hentz 2018\n License: GNU General Public License, version 3 (GPL-3.0)\n\n This program is distributed WITHOUT ANY WARRANTY;\n without even the implied warranty of MERCHANTABILITY\n or FITNESS FOR A PARTICULAR PURPOSE.\n -------------------------------------------------------------------------------\n\"\"\"\n\nimport pandas as pd\nimport requests\nimport datetime as dt\n\n\n\"\"\" BaseURL defines the NASA site used to retrieve Lat/Lon specific data \"\"\"\nBaseURL = 'https://power.larc.nasa.gov/cgi-bin/v1/DataAccess.py?'\n\n\ndef getLocationData(dtin):\n \"\"\" Retrieves the NASA Location data from the request response \n Returns tuple of form (Lon, Lat' Elev) \"\"\"\n schval = 'coordinates\\\": ['\n stpt = dtin.find(schval)\n ndpt = dtin.find(']', stpt+len(schval))\n ln = []\n for itm in dtin[stpt+len(schval):ndpt].rstrip().split(','):\n ln.append(float(itm.strip('\\n ')))\n return ln\n\n \ndef getSiteElevation(lat, lon):\n baseURL = BaseURL\n baseReq = 'request=execute&identifier=SinglePoint&parameters=T2M'\n dateSel = dateSel = '&startDate=20140101&endDate=20140101&userCommunity=SSE'\n outSel = '&tempAverage=DAILY&outputList=JSON,ASCII&'\n locSel = 'lat={0}&lon={1}&user=anonymous'.format(lat, lon)\n cmd = baseURL + baseReq + dateSel + outSel + locSel\n # Request NASA Data from API\n try:\n data = requests.get(cmd).text\n return getLocationData(data)\n except requests.exceptions.ConnectionError:\n return [None, None, None]\n \ndef formulateRequest(lat, lon, selectparms= None):\n \"\"\" Formulate a request from NASA API for 10 years of atmospheric data \n required to prepare daily statistical data used in Solar Insolation\n calculations \"\"\"\n baseURL = BaseURL\n baseReq = 'request=execute&identifier=SinglePoint&parameters='\n stdparms = [('T10M','Temperature @ 10m (c)'), \n ('T10M_MAX', 'Max Daily Temperature (c)'),\n ('T10M_MIN', 'Min Daily Temperature (c)'),\n ('WS10M','Surface Wind Speed (m/s)'),\n ('WS10M_MAX','Max Daily Wind Speed (m/s)'),\n ('WS10M_MIN','Min Daily Wind Speed (m/s)')\n ] \n now = dt.date.today()\n baseyear = now.year-1\n startdate='{0}0101'.format(baseyear-9)\n enddate ='{0}1231'.format(baseyear)\n # build request parameters\n parms = []\n for itm in stdparms:\n if selectparms == None or itm[0] in selectparms:\n parms.append(itm[0])\n reqparms = ''\n for p in range(len(parms)):\n if p > 0:\n reqparms += ','\n reqparms += parms[p] \n dateSel = '&startDate={0}&endDate={1}&userCommunity=SSE'.format(\n startdate, enddate)\n outSel = '&tempAverage=DAILY&outputList=JSON,ASCII&'\n locSel = 'lat={0}&lon={1}&user=anonymous'.format(lat, lon)\n cmd = baseURL + baseReq + reqparms + dateSel + outSel + locSel \n return (cmd, reqparms.split(','))\n\n\ndef LoadNasaData(lat, lon, show= False, selectparms= None): \n \"\"\" Execute a request from NASA API for 10 years of atmospheric data \n required to prepare daily statistical data used in Solar Insolation\n calculations \"\"\"\n cmd = formulateRequest(-0.2739, 36.3765, selectparms)\n jdi = requests.get(cmd[0]).json()\n cols = cmd[1]\n df = pd.json_normalize(jdi['features'][0]['properties']['parameter'][cols[0]]).T\n df.index = pd.to_datetime(df.index)\n df.rename(columns={0: cols[0]}, inplace= True)\n for c in cols[1:]:\n dfc = pd.json_normalize(jdi['features'][0]['properties']['parameter'][c]).T\n dfc.index = pd.to_datetime(df.index)\n dfc.rename(columns={0: c}, inplace= True)\n df = df.join(dfc)\n df['DayofYear'] = df.index.dayofyear\n df = df[df['DayofYear'] != 366] #drop a day for leap years\n atmo_dict = dict()\n dg = df.groupby('DayofYear')\n for col in cols:\n dp = pd.DataFrame(dg[col].min())\n dp.rename(columns={col: 'Min'}, inplace= True)\n atmo_dict[col] = dp\n dp = pd.DataFrame(dg[col].max())\n dp.rename(columns={col: 'Max'}, inplace= True)\n atmo_dict[col] = atmo_dict[col].join(dp)\n dp = pd.DataFrame(dg[col].mean())\n dp.rename(columns={col: 'S-Mean'}, inplace= True)\n atmo_dict[col] = atmo_dict[col].join(dp)\n dp = pd.DataFrame(dg[col].std())\n dp.rename(columns={col: 'STDV'}, inplace= True)\n atmo_dict[col] = atmo_dict[col].join(dp) \n return atmo_dict\n\n\ndef main():\n\n# find_parms = ['ALLSKY_SFC_SW_DWN', 'PS']\n d_dict = LoadNasaData(-0.2739, 36.3765, show = False) \n tav = d_dict['T10M']['S-Mean'].values\n tmx = d_dict['T10M_MAX']['S-Mean'].values\n tmn = d_dict['T10M_MIN']['S-Mean'].values\n wav = d_dict['WS10M']['S-Mean'].values\n wmx = d_dict['WS10M_MAX']['S-Mean'].values\n wmn = d_dict['WS10M_MIN']['S-Mean'].values\n \n for i in range(10):\n st = 'Day: {0}\\tAvg Temp: {1:.2f}\\tMax Temp: {2:.2f}\\tMin Temp: {3:.2f}\\n'.format(i, tav[i], tmx[i], tmn[i])\n sw = '\\tAvg WS: {0:.2f}\\tMax WS: {1:.2f}\\tMin WSp: {2:.2f}'.format(wav[i], wmx[i], wmn[i])\n st += sw\n print(st)\n\n\nif __name__ == '__main__':\n main() \n" ]
[ [ "pandas.json_normalize", "pandas.to_datetime" ] ]
FredaXin/eda_and_beyond
[ "d78d25c305f1a23f1568d420ba8bb6bee12e5c38" ]
[ "eda_and_beyond/eda_tools.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.linear_model import LinearRegression, LassoCV, RidgeCV\nfrom sklearn.model_selection import cross_val_score, train_test_split\nfrom sklearn.preprocessing import StandardScaler, PolynomialFeatures\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score\nfrom sklearn.dummy import DummyRegressor\n\n\n\n\"\"\"\nNAME \n eda_tools\n\nDESCRIPTION\n This module provides functions to automate common procedures in EDA, model\n preparation, and data visualization process. \n\nMODULE CONTENTS\n inspect_dupes\n inspect_nans\n view_columns_w_many_nans\n drop_columns_w_many_nans\n histograms_numeric_columns\n boxplots_categorical_columns\n scatter_plots\n heatmap_numeric_w_dependent_variable\n high_corr_w_dependent_variable\n high_corr_among_independent_variable\n categorical_to_ordinal_transformer\n transform_categorical_to_numercial\n dummify_categorical_columns\n conform_columns\n viz_resids\n print_error_metrics\n\"\"\"\n\n\n\ndef inspect_dupes(df, dedupe=False):\n '''\n Checks duplicates (rows), and gets rid of duplicates if dedupe arg set to 'True' \n Arg: dataframe, dedupe (bool)\n '''\n num_of_dupe = len(df[df.duplicated()])\n\n if dedupe and num_of_dupe>0: \n df.drop_duplicates(inplace=True)\n print(f'Number of duplicates found: {num_of_dupe}')\n return df\n\n else: \n print(f'Number of duplicates found: {num_of_dupe}')\n return num_of_dupe\n\n\n\ndef inspect_nans(df): \n '''\n Check number and percentage of NaN\n Arg: dataframe\n '''\n num_of_nan = df.isnull().sum().sum()\n\n if num_of_nan > 0:\n mask_total = df.isnull().sum().sort_values(ascending=False) \n number = mask_total[mask_total > 0]\n\n mask_percent = df.isnull().mean().sort_values(ascending=False) \n percent = mask_percent[mask_percent > 0] \n\n missing_data = pd.concat([number, percent], axis=1, keys=['Number_of_NaN', 'Percent_of_NaN'])\n print(f'Number and Percentage of NaN:\\n {missing_data}')\n else: \n print('No NaN found.')\n \n return num_of_nan\n\n\n\ndef view_columns_w_many_nans(df, missing_percent=.9):\n '''\n Checks which columns have over specified percentage of missing\n values \n Args: dataframe, missing percentage (default=.9)\n Returns columns (list)\n '''\n mask_percent = df.isnull().mean()\n series = mask_percent[mask_percent > missing_percent]\n columns = series.index.to_list()\n print(columns) \n return columns\n\n\n\ndef drop_columns_w_many_nans(df, missing_percent=.9):\n '''\n Drops the columns whose missing value are bigger than the specified missing percentage\n Args: dataframe, missing percentage (default=.9)\n Returns dataframe\n '''\n list_of_cols = view_columns_w_many_nans(df, missing_percent=missing_percent)\n df.drop(columns=list_of_cols, inplace=True)\n print(list_of_cols, 'Caution: df has been mutated!')\n return df\n\n\n\n# Adapted from https://www.kaggle.com/dgawlik/house-prices-eda#Categorical-data\n# Reference: https://seaborn.pydata.org/tutorial/axis_grids.html\ndef histograms_numeric_columns(df, numerical_columns):\n '''\n Args: dataframe, numerical columns (list)\n Returns group histagrams\n '''\n f = pd.melt(df, value_vars=numerical_columns) \n g = sns.FacetGrid(f, col='variable', col_wrap=4, sharex=False, sharey=False)\n g = g.map(sns.distplot, 'value')\n return g\n\n\n\n# Adapted from https://www.kaggle.com/dgawlik/house-prices-eda#Categorical-data\ndef boxplots_categorical_columns(df, categorical_columns, dependant_variable):\n '''\n Args: dataframe, categorical columns (list), dependant variable (str)\n Returns group boxplots of correlations between categorical varibles and dependant variable\n '''\n def boxplot(x, y, **kwargs):\n sns.boxplot(x=x, y=y)\n x=plt.xticks(rotation=90)\n\n f = pd.melt(df, id_vars=[dependant_variable], value_vars=categorical_columns)\n g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False, height=10)\n g = g.map(boxplot, 'value', dependant_variable)\n return g\n\n\n\ndef scatter_plots(df, numerical_cols, target_col):\n '''\n Args: dataframe, numerical columns (list), target column (str)\n '''\n # Calculate the number of rows\n num_rows = (len(numerical_cols) // 3) + 1\n # Generate a 3 x n subplots frame\n fix, ax = plt.subplots(num_rows, 3, sharey='row', figsize=(15,20))\n\n # Reference: https://stackoverflow.com/a/434328\n # Define a function to iterate through a list and divide them into chunks\n def chunker(seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))\n \n # Iterate through numerical_cols and generate each subplot\n for y, plot_group in enumerate(chunker((numerical_cols), 3)):\n for x, col in enumerate(plot_group):\n sub_ax = ax[y][x]\n sub_ax.scatter(df[col], df[target_col], s=2)\n sub_ax.set_title(col)\n \n\n\ndef heatmap_numeric_w_dependent_variable(df, dependent_variable):\n '''\n Args: dataframe, dependant variable (str)\n Returns heatmap of independent variables' correlations with dependent variable \n '''\n plt.figure(figsize=(8, 10))\n g = sns.heatmap(df.corr()[[dependent_variable]].sort_values(by=dependent_variable), \n annot=True, \n cmap='coolwarm', \n vmin=-1,\n vmax=1) \n return g\n\n\n\ndef high_corr_w_dependent_variable(df, dependent_variable, corr_value):\n '''\n Args: dataframe, dependent variable (str), and value of correlation (float)\n Returns dataframe of independant varibles that are highly (e.g. abs(corr) > 0.4) with dependent varible\n '''\n temp_df = df.corr()[[dependent_variable]].sort_values(by=dependent_variable, ascending=False)\n mask_1 = abs(temp_df[dependent_variable]) > corr_value\n return temp_df.loc[mask_1]\n\n\n\ndef high_corr_among_independent_variable(df, dependent_variable, corr_value):\n '''\n Checks correlation among independant varibles, and checks which two features have strong correlation\n Args: dataframe, dependent variable, and value of correlation \n Returns dictionary \n '''\n df_corr = df.drop(columns=[dependent_variable]).corr()\n corr_dict = df_corr.to_dict()\n temp_dict = {key_1: {key_2 : value \n for key_2, value in imbeded_dictionary.items() \n if abs(value) < 1 and abs(value) > corr_value}\n for key_1, imbeded_dictionary in corr_dict.items()}\n return {k:v for k, v in temp_dict.items() if v}\n\n\n\ndef categorical_to_ordinal_transformer(categories):\n '''\n Returns a function that will map categories to ordinal values based on the\n order of the list of `categories` given. \n Example: \n If categories is ['A', 'B', 'C'] then the transformer will map \n 'A' -> 0, 'B' -> 1, 'C' -> 2.\n '''\n return lambda categorical_value: categories.index(categorical_value)\n\n\n\ndef transform_categorical_to_numercial(df, categorical_numerical_mapping):\n '''\n Transforms categorical columns to numerical columns\n Args: dataframe, dictionary \n Returns dataframe\n '''\n transformers = {k: categorical_to_ordinal_transformer(v) \n for k, v in categorical_numerical_mapping.items()}\n new_df = df.copy()\n for col, transformer in transformers.items():\n new_df[col] = new_df[col].map(transformer).astype('int64')\n return new_df\n\n\n\ndef dummify_categorical_columns(df):\n '''\n Dummifies all categorical columns\n Args: dataframe\n Returns dataframe\n '''\n categorical_columns = df.select_dtypes(include=\"object\").columns\n return pd.get_dummies(df, columns=categorical_columns, drop_first=True)\n\n\n\ndef conform_columns(df_reference, df):\n '''\n Drops columns in dataframe that are not in the reference dataframe\n Args: dataframe as reference, dataframe\n Returns dataframe\n '''\n to_drop = [c for c in df.columns if c not in df_reference.columns]\n return df.drop(to_drop, axis=1)\n\n\n\ndef viz_resids(model_title, X, y, random_state_number=42):\n '''\n Thanks to Mahdi Shadkam-Farrokhi for creating this visualization function!\n Args: model title (str), X(features), y(target)\n Returns 3 error plots \n '''\n \n # For help with multiple figures: https://matplotlib.org/3.1.1/gallery/subplots_axes_and_figures/subplots_demo.html\n\n # HANDLING DATA\n # train/test split\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state_number)\n\n # instatiate model\n lr = LinearRegression()\n # fit model\n lr.fit(X_train, y_train)\n\n preds = lr.predict(X_test)\n resids = y_test - preds\n target_name = y.name.capitalize()\n\n # HANDLING SUBPLOTS\n fig, axes = plt.subplots(2, 2, figsize=(12,10)) # 2 row x 2 columns\n fig.suptitle(f\"{model_title}: $R^2$ test ={lr.score(X_test, y_test):2.2%}\", fontsize = 24, y = 1.05)\n\n ax_1 = axes[0][0]\n ax_2 = axes[0][1]\n ax_3 = axes[1][0]\n\n subplot_title_size = 18\n subplot_label_size = 14\n \n # 1ST PLOT - y_true vs. y_pred\n ax_1.set_title(\"True Values ($y$) vs. Predictions ($\\hat{y}$)\", fontsize = subplot_title_size, pad = 10)\n maxDist = max(max(preds),max(y)) # maxiumum value used to determin x_lim and y_lim\n minDist = min(min(preds),min(y)) # maxiumum value used to determin x_lim and y_lim\n # 45deg line, signifying prediction == true value\n ax_1.plot((minDist,maxDist),(minDist,maxDist), c = \"r\", alpha = .7);\n \n sns.scatterplot(ax = ax_1, x = y_test, y = preds, alpha = .5)\n ax_1.set_xlabel(\"True Values ($y$)\", fontsize = subplot_label_size, labelpad = 10)\n ax_1.set_ylabel(\"Predictions ($\\hat{y}$)\", fontsize = subplot_label_size, labelpad = 10)\n\n # 2ND PLOT - residuals\n ax_2.set_title(\"Residuals\", fontsize = subplot_title_size)\n sns.scatterplot(ax = ax_2, x = range(len(resids)),y = resids, alpha = .5)\n ax_2.set_ylabel(target_name, fontsize = subplot_label_size)\n ax_2.axhline(0, c = \"r\", alpha = .7);\n\n # 3RD PLOT - residuals histogram\n ax_3.set_title(\"Histogram of residuals\", fontsize = subplot_title_size)\n sns.distplot(resids, ax = ax_3, kde = False);\n ax_3.set_xlabel(target_name, fontsize = subplot_label_size)\n ax_3.set_ylabel(\"Frequency\", fontsize = subplot_label_size)\n\n plt.tight_layout() # handles most overlaping and spacing issues\n\n\n\ndef print_error_metrics(y_true, y_preds, n, k):\n '''\n Args: y_true, y_preds, \n n: the number of observations.\n k: the number of independent variables, excluding the constant.\n Returns 6 error metrics\n '''\n def r2_adj(y_true, y_preds, n, k):\n rss = np.sum((y_true - y_preds)**2)\n null_model = np.sum((y_true - np.mean(y_true))**2)\n r2 = 1 - rss/null_model\n r2_adj = 1 - ((1-r2)*(n-1))/(n-k-1)\n return r2_adj\n \n print('Mean Square Error: ', mean_squared_error(y_true, y_preds))\n print('Root Mean Square Error: ', np.sqrt(mean_squared_error(y_true, y_preds)))\n print('Mean absolute error: ', mean_absolute_error(y_true, y_preds))\n print('Median absolute error: ', median_absolute_error(y_true, y_preds))\n print('R^2 score:', r2_score(y_true, y_preds))\n print('Adjusted R^2 score:', r2_adj(y_true, y_preds, n, k))\n\n\n\n\n\n\n" ]
[ [ "pandas.concat", "matplotlib.pyplot.tight_layout", "numpy.sum", "sklearn.metrics.r2_score", "sklearn.metrics.median_absolute_error", "matplotlib.pyplot.figure", "sklearn.metrics.mean_absolute_error", "matplotlib.pyplot.subplots", "sklearn.model_selection.train_test_split", "sklearn.metrics.mean_squared_error", "numpy.mean", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.xticks", "pandas.melt", "pandas.get_dummies" ] ]
alwinw/sktime
[ "a6f17bd586df6bbc8e6c783f08eda4c30d2353f9" ]
[ "sktime/transformers/series_as_features/dictionary_based/_sax.py" ]
[ "# -*- coding: utf-8 -*-\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport scipy.stats\n\nfrom sktime.transformers.series_as_features.base import BaseSeriesAsFeaturesTransformer\nfrom sktime.transformers.series_as_features.dictionary_based import PAA\n\n# TO DO: verify this returned pandas is consistent with sktime\n# definition. Timestamps?\nfrom sktime.utils.validation.series_as_features import check_X\n\n# from numba import types\n# from numba.experimental import jitclass\n\n__author__ = \"Matthew Middlehurst\"\n\n\nclass SAX(BaseSeriesAsFeaturesTransformer):\n \"\"\"SAX (Symbolic Aggregate approXimation) Transformer, as described in\n Jessica Lin, Eamonn Keogh, Li Wei and Stefano Lonardi,\n \"Experiencing SAX: a novel symbolic representation of time series\"\n Data Mining and Knowledge Discovery, 15(2):107-144\n Overview: for each series:\n run a sliding window across the series\n for each window\n shorten the series with PAA (Piecewise Approximate Aggregation)\n discretise the shortened series into fixed bins\n form a word from these discrete values\n by default SAX produces a single word per series (window_size=0).\n SAX returns a pandas data frame where column 0 is the histogram (sparse\n pd.series)\n of each series.\n\n Parameters\n ----------\n word_length: int, length of word to shorten window to (using\n PAA) (default 8)\n alphabet_size: int, number of values to discretise each value\n to (default to 4)\n window_size: int, size of window for sliding. Input series\n length for whole series transform (default to 12)\n remove_repeat_words: boolean, whether to use numerosity reduction (\n default False)\n save_words: boolean, whether to use numerosity reduction (\n default False)\n\n return_pandas_data_series: boolean, default = True\n set to true to return Pandas Series as a result of transform.\n setting to true reduces speed significantly but is required for\n automatic test.\n\n Attributes\n ----------\n words: histor = []\n\n \"\"\"\n\n def __init__(\n self,\n word_length=8,\n alphabet_size=4,\n window_size=12,\n remove_repeat_words=False,\n save_words=False,\n return_pandas_data_series=True,\n ):\n self.word_length = word_length\n self.alphabet_size = alphabet_size\n self.window_size = window_size\n self.remove_repeat_words = remove_repeat_words\n self.save_words = save_words\n self.return_pandas_data_series = return_pandas_data_series\n self.words = []\n\n super(SAX, self).__init__()\n\n def transform(self, X, y=None):\n \"\"\"\n\n Parameters\n ----------\n X : nested pandas DataFrame of shape [n_instances, 1]\n Nested dataframe with univariate time-series in cells.\n\n Returns\n -------\n dims: Pandas data frame with first dimension in column zero\n \"\"\"\n self.check_is_fitted()\n X = check_X(X, enforce_univariate=True, coerce_to_numpy=True)\n X = X.squeeze(1)\n\n if self.alphabet_size < 2 or self.alphabet_size > 4:\n raise RuntimeError(\"Alphabet size must be an integer between 2 and 4\")\n if self.word_length < 1 or self.word_length > 16:\n raise RuntimeError(\"Word length must be an integer between 1 and 16\")\n\n breakpoints = self._generate_breakpoints()\n n_instances, series_length = X.shape\n\n bags = pd.DataFrame()\n dim = []\n\n for i in range(n_instances):\n bag = {}\n lastWord = -1\n\n words = []\n\n num_windows_per_inst = series_length - self.window_size + 1\n split = np.array(\n X[\n i,\n np.arange(self.window_size)[None, :]\n + np.arange(num_windows_per_inst)[:, None],\n ]\n )\n\n split = scipy.stats.zscore(split, axis=1)\n\n paa = PAA(num_intervals=self.word_length)\n data = pd.DataFrame()\n data[0] = [pd.Series(x, dtype=np.float32) for x in split]\n patterns = paa.fit_transform(data)\n patterns = np.asarray([a.values for a in patterns.iloc[:, 0]])\n\n for n in range(patterns.shape[0]):\n pattern = patterns[n, :]\n word = self._create_word(pattern, breakpoints)\n words.append(word)\n lastWord = self._add_to_bag(bag, word, lastWord)\n\n if self.save_words:\n self.words.append(words)\n\n dim.append(pd.Series(bag) if self.return_pandas_data_series else bag)\n\n bags[0] = dim\n\n return bags\n\n def _create_word(self, pattern, breakpoints):\n word = 0\n for i in range(self.word_length):\n for bp in range(self.alphabet_size):\n if pattern[i] <= breakpoints[bp]:\n word = (word << 2) | bp\n break\n\n return word\n\n def _add_to_bag(self, bag, word, last_word):\n if self.remove_repeat_words and word == last_word:\n return False\n bag[word] = bag.get(word, 0) + 1\n return True\n\n def _generate_breakpoints(self):\n # Pre-made gaussian curve breakpoints from UEA TSC codebase\n return {\n 2: [0, sys.float_info.max],\n 3: [-0.43, 0.43, sys.float_info.max],\n 4: [-0.67, 0, 0.67, sys.float_info.max],\n 5: [-0.84, -0.25, 0.25, 0.84, sys.float_info.max],\n 6: [-0.97, -0.43, 0, 0.43, 0.97, sys.float_info.max],\n 7: [-1.07, -0.57, -0.18, 0.18, 0.57, 1.07, sys.float_info.max],\n 8: [-1.15, -0.67, -0.32, 0, 0.32, 0.67, 1.15, sys.float_info.max],\n 9: [-1.22, -0.76, -0.43, -0.14, 0.14, 0.43, 0.76, 1.22, sys.float_info.max],\n 10: [\n -1.28,\n -0.84,\n -0.52,\n -0.25,\n 0.0,\n 0.25,\n 0.52,\n 0.84,\n 1.28,\n sys.float_info.max,\n ],\n }[self.alphabet_size]\n" ]
[ [ "numpy.asarray", "numpy.arange", "pandas.Series", "pandas.DataFrame" ] ]
orestisfl/arxiv-classifier
[ "df41ad84137b48f77c3a27ee1c84471e22819967" ]
[ "train.py" ]
[ "import json\nimport logging\nimport os\nimport pickle\nimport random\n\nimport pandas as pd\nfrom scipy.special import softmax\nfrom simpletransformers.classification import ClassificationModel\n\nlogging.basicConfig(level=logging.INFO)\ntransformers_logger = logging.getLogger(\"transformers\")\ntransformers_logger.setLevel(logging.WARNING)\n\nMODEL = (\"roberta\", \"roberta-base\")\n# MODEL = (\"mobilebert\", \"google/mobilebert-uncased\")\n# MODEL = (\"distilbert\", \"distilbert-base-uncased\")\n\n\ndef single_line(s):\n return \" \".join(s.split())\n\n\ndef init_data(limit=0):\n # Download from https://www.kaggle.com/Cornell-University/arxiv\n with open(\"arxiv-metadata-oai-snapshot.json\") as f:\n for idx, line in enumerate(f):\n entry = json.loads(line)\n\n cs = se = False\n for cat in entry[\"categories\"].lower().split():\n if cat in (\"cs.se\", \"cs.pl\"):\n cs = se = True\n break\n if cat.startswith(\"cs.\") or cat == \"stat.ml\":\n cs = True\n if not cs:\n continue\n\n yield [\n entry[\"id\"],\n single_line(entry[\"title\"])\n + \" abstract: \"\n + single_line(entry[\"abstract\"]),\n int(se),\n ]\n if limit > 0 and idx >= limit:\n break\n\n\ndef repeat_1(data):\n \"\"\"\n Simple oversampling for the '1' class due to heavy imbalancing\n \"\"\"\n for x in data:\n label = x[-1]\n if label == 1:\n for _ in range(10):\n yield x\n else:\n assert label == 0\n yield x\n\n\ndata = list(init_data())\nrandom.shuffle(data)\nsplit_idx = int(len(data) * 0.5)\nfor idx in range(3):\n if idx == 0:\n train_data = data[:split_idx]\n test_data = data[split_idx:]\n elif idx == 1:\n train_data = data[split_idx:]\n test_data = data[:split_idx]\n else:\n train_data = data\n test_data = None\n train_data = list(repeat_1(train_data))\n random.shuffle(train_data)\n\n train_data = pd.DataFrame(data=train_data, columns=[\"arxivId\", \"text\", \"isSE\"])\n train_data.to_csv(f\"train{idx}.csv\")\n if test_data is not None:\n test_data = pd.DataFrame(data=test_data, columns=[\"arxivId\", \"text\", \"isSE\"])\n test_data.to_csv(f\"test{idx}.csv\")\n\n model = ClassificationModel(\n *MODEL,\n args={\n \"train_batch_size\": 64,\n \"eval_batch_size\": 64,\n \"process_count\": 8,\n \"save_eval_checkpoints\": False,\n \"output_dir\": f\"outputs{idx}/\",\n },\n ) # , num_labels=2, use_cuda=True)\n model.train_model(train_data.drop(\"arxivId\", 1))\n\n if test_data is None:\n continue\n\n result, model_outputs, wrong_predictions = model.eval_model(\n test_data.drop(\"arxivId\", 1)\n )\n print(idx, result)\n with open(f\"result{idx}.pickle\", \"wb\") as f:\n pickle.dump([result, model_outputs, wrong_predictions], f)\n\n # TODO: include year\n # XXX: Sort can include age. E.g. (prob is SE/PL) - (age in years) / 5\n with open(f\"false-positives{idx}.txt\", \"w\") as f:\n for p, x in sorted(\n ((model_outputs[x.guid], x) for x in wrong_predictions if x.label == 0),\n key=lambda x: x[0][0],\n ):\n print(\n r\"https://arxiv.org/abs/\" + test_data[\"arxivId\"][x.guid],\n softmax(p)[1],\n x.text_a,\n file=f,\n )\n" ]
[ [ "scipy.special.softmax", "pandas.DataFrame" ] ]
sun-xiaoyu/allennlp
[ "b49aff6aac4e9912564ee8235250d50c9d17e53f" ]
[ "allennlp/modules/seq2seq_encoders/pytorch_transformer_wrapper.py" ]
[ "from typing import Optional\n\nfrom overrides import overrides\nimport torch\nfrom torch import nn\n\nfrom allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder\nfrom allennlp.nn.util import add_positional_features\n\n\[email protected](\"pytorch_transformer\")\nclass PytorchTransformer(Seq2SeqEncoder):\n \"\"\"\n Implements a stacked self-attention encoder similar to the Transformer\n architecture in [Attention is all you Need]\n (https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077).\n\n This class adapts the Transformer from torch.nn for use in AllenNLP. Optionally, it adds positional encodings.\n\n Registered as a `Seq2SeqEncoder` with name \"pytorch_transformer\".\n\n # Parameters\n\n input_dim : `int`, required.\n The input dimension of the encoder.\n feedforward_hidden_dim : `int`, required.\n The middle dimension of the FeedForward network. The input and output\n dimensions are fixed to ensure sizes match up for the self attention layers.\n num_layers : `int`, required.\n The number of stacked self attention -> feedforward -> layer normalisation blocks.\n num_attention_heads : `int`, required.\n The number of attention heads to use per layer.\n use_positional_encoding : `bool`, optional, (default = True)\n Whether to add sinusoidal frequencies to the input tensor. This is strongly recommended,\n as without this feature, the self attention layers have no idea of absolute or relative\n position (as they are just computing pairwise similarity between vectors of elements),\n which can be important features for many tasks.\n dropout_prob : `float`, optional, (default = 0.1)\n The dropout probability for the feedforward network.\n \"\"\" # noqa\n\n def __init__(\n self,\n input_dim: int,\n num_layers: int,\n feedforward_hidden_dim: int = 2048,\n num_attention_heads: int = 8,\n positional_encoding: Optional[str] = None,\n positional_embedding_size: int = 512,\n dropout_prob: float = 0.1,\n activation: str = \"relu\",\n ) -> None:\n super().__init__()\n\n layer = nn.TransformerEncoderLayer(\n d_model=input_dim,\n nhead=num_attention_heads,\n dim_feedforward=feedforward_hidden_dim,\n dropout=dropout_prob,\n activation=activation,\n )\n self._transformer = nn.TransformerEncoder(layer, num_layers)\n self._input_dim = input_dim\n\n # initialize parameters\n # We do this before the embeddings are initialized so we get the default initialization for the embeddings.\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n if positional_encoding is None:\n self._sinusoidal_positional_encoding = False\n self._positional_embedding = None\n elif positional_encoding == \"sinusoidal\":\n self._sinusoidal_positional_encoding = True\n self._positional_embedding = None\n elif positional_encoding == \"embedding\":\n self._sinusoidal_positional_encoding = False\n self._positional_embedding = nn.Embedding(positional_embedding_size, input_dim)\n else:\n raise ValueError(\n \"positional_encoding must be one of None, 'sinusoidal', or 'embedding'\"\n )\n\n @overrides\n def get_input_dim(self) -> int:\n return self._input_dim\n\n @overrides\n def get_output_dim(self) -> int:\n return self._input_dim\n\n @overrides\n def is_bidirectional(self):\n return False\n\n @overrides\n def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor):\n output = inputs\n if self._sinusoidal_positional_encoding:\n output = add_positional_features(output)\n if self._positional_embedding is not None:\n position_ids = torch.arange(inputs.size(1), dtype=torch.long, device=output.device)\n position_ids = position_ids.unsqueeze(0).expand(inputs.shape[:-1])\n output = output + self._positional_embedding(position_ids)\n\n # For some reason the torch transformer expects the shape (sequence, batch, features), not the more\n # familiar (batch, sequence, features), so we have to fix it.\n output = output.permute(1, 0, 2)\n # For some other reason, the torch transformer takes the mask backwards.\n mask = ~mask\n output = self._transformer(output, src_key_padding_mask=mask)\n output = output.permute(1, 0, 2)\n\n return output\n" ]
[ [ "torch.nn.TransformerEncoderLayer", "torch.nn.Embedding", "torch.nn.TransformerEncoder", "torch.nn.init.xavier_uniform_" ] ]
VoxelPi/compm
[ "745019d4e0d156910f19ed9168949f150356a349" ]
[ "ue/ue_05/problem_6.py" ]
[ "import numpy as np\n\ndef pseudo(A):\n # Check if matrix A is injective.\n if np.linalg.det(A.T @ A) < 1e-9:\n print(\"the given matrix is not injective.\")\n return\n\n # Return pseudo-inverse (See lecture notes page 155)\n print(\"the given matrix is injective.\")\n return np.linalg.inv(A.T @ A) @ A.T\n\n# A = np.array([[1, 2, 0], [2, 4, 0], [1, 0, 3]]) # not injective\nA = np.array([[1, 2, 0], [1, 4, 2], [1, 0, 3]]) # injective\nprint(pseudo(A))\n\nA = np.array([[1, 0], [2, 1], [0, 2]])\nb = np.array([1, 0, 1]).reshape((3, 1))\nx = pseudo(A) @ b\nprint(\"\\nOwn implementation:\")\nprint(x)\n\nprint(\"\\nNumpy implementation:\")\nprint(np.linalg.lstsq(a = A, b = b, rcond=None)[0])" ]
[ [ "numpy.linalg.det", "numpy.linalg.lstsq", "numpy.array", "numpy.linalg.inv" ] ]
gizzmo25/pythoncode-tutorials
[ "39a413fc1da232ad6de7e5f1e8955564dc65448e", "39a413fc1da232ad6de7e5f1e8955564dc65448e" ]
[ "machine-learning/image-transformation/cropping.py", "machine-learning/edge-detection/edge_detector.py" ]
[ "import numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\n\r\n# read the input image\r\nimg = cv2.imread(\"city.jpg\")\r\n# convert from BGR to RGB so we can plot using matplotlib\r\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n# disable x & y axis\r\nplt.axis('off')\r\n# show the image\r\nplt.imshow(img)\r\nplt.show()\r\n\r\n# get 200 pixels from 100 to 300 on both x-axis & y-axis\r\n# change that if you will, just make sure you don't exceed cols & rows\r\ncropped_img = img[100:300, 100:300]\r\n# disable x & y axis\r\nplt.axis('off')\r\n# show the resulting image\r\nplt.imshow(cropped_img)\r\nplt.show()\r\n# save the resulting image to disk\r\nplt.imsave(\"city_cropped.jpg\", cropped_img)", "import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\n\r\n# read the image\r\nimage = cv2.imread(sys.argv[1])\r\n\r\n# convert it to grayscale\r\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n# show the grayscale image, if you want to show, uncomment 2 below lines\r\n# plt.imshow(gray, cmap=\"gray\")\r\n# plt.show()\r\n\r\n# perform the canny edge detector to detect image edges\r\nedges = cv2.Canny(gray, threshold1=30, threshold2=100)\r\n\r\n# show the detected edges\r\nplt.imshow(edges, cmap=\"gray\")\r\nplt.show()" ]
[ [ "matplotlib.pyplot.imsave", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "matplotlib.pyplot.axis" ], [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.show" ] ]
scottwedge/ev3sim
[ "751c9902e7615d27d52e4b45b34e6acb47c06d24" ]
[ "ev3sim/devices/colour/base.py" ]
[ "import random\nimport numpy as np\n\n\nclass ColourSensorMixin:\n\n RGB_RAW = \"RGB-RAW\"\n\n device_type = \"lego-sensor\"\n mode = RGB_RAW\n\n SENSOR_RADIUS = 1\n SENSOR_POINTS = 100\n\n def _SenseValueAboutPosition(self, centrePosition, valueGetter):\n # Randomly sample value from SENSOR_POINTS chosen around the centrePosition.\n points = [random.random() * self.SENSOR_RADIUS for _ in range(self.SENSOR_POINTS)]\n for x in range(len(points)):\n angle = random.random() * 2 * np.pi\n points[x] = valueGetter(np.array([np.cos(angle) * points[x], np.cos(angle) * points[x]]) + centrePosition)\n # For some reason the color module hangs otherwise :/\n if hasattr(points[x], \"r\"):\n points[x] = np.array([points[x].r, points[x].g, points[x].b])\n total = points[0]\n for x in range(1, len(points)):\n total += points[x]\n return total / len(points)\n\n def _getObjName(self, port):\n return \"sensor\" + port\n\n def applyWrite(self, attribute, value):\n if attribute == \"mode\":\n self.mode = value\n else:\n raise ValueError(f\"Unhandled write! {attribute} {value}\")\n\n def toObject(self):\n res = self.raw()\n data = {\n \"address\": self._interactor.port,\n \"driver_name\": \"lego-ev3-color\",\n \"mode\": self.mode,\n }\n if self.mode == self.RGB_RAW:\n data[\"value0\"], data[\"value1\"], data[\"value2\"] = res\n else:\n raise ValueError(f\"Unhandled mode {self.mode}\")\n return data\n" ]
[ [ "numpy.array", "numpy.cos" ] ]
zbzhzhy/Hyperspectral-Image-Super-resolution-via-Deep-Progressive-Zero-centric-Residual-Learning
[ "39f103a19fd54cc765487389f14f90a23e5e96bf" ]
[ "demo_cave/Hyper_loader_2.py" ]
[ "import numpy as np\r\nimport torch\r\nimport cv2\r\nfrom torch.utils.data import Dataset, DataLoader, TensorDataset\r\nfrom torch.autograd import Variable\r\nimport scipy.ndimage as scin\r\nfrom scipy import ndimage\r\nfrom get_name import get_name\r\nimport scipy.io as scio\r\nimport h5py\r\n# import lmdb\r\nimport os\r\nimport random\r\nimport h5py\r\nnew_load = lambda *a,**k: np.load(*a, allow_pickle=True, **k)\r\nclass Hyper_dataset(Dataset):\r\n \"\"\"\r\n \r\n get the Hyperspectral image and corrssponding RGB image \r\n use all data : high resolution HSI, high resolution MSI, low resolution HSI\r\n \"\"\"\r\n def __init__(self, output_shape=512,ratio = 1,Training_mode='Train',data_name = 'CAVE',use_generated_data = False, use_all_data = True):\r\n # self.path = '/home/zhu_19/Hyperspectral_image/Hyperspectral_image_comparing_method/MHF-net-master/CAVEdata/'\r\n self.data_name = data_name\r\n if data_name == 'CAVE':\r\n self.path = '/home/zhu_19/data/hyperspectral/12_31/CAVEdata_1931/'\r\n # file_name = os.walk(self.path+'X/')\r\n # file_name = [i for i in file_name]\r\n # self.file_name = file_name[0][2]\r\n # self.file_name = np.load('/home/grads/zhiyuzhu2/hyperspectral_image/hyperspectral/file_name7048.npy')\r\n name = scio.loadmat('/home/zhu_19/data/instance/file_name.mat')\r\n self.train_name = name['train']\r\n self.test_name = name['test']\r\n self.num_pre_img = 4\r\n self.train_len = 20*16\r\n self.test_len = 12\r\n elif data_name == 'HARVARD':\r\n self.train_path = '/public/SSD/Harvard/train/'\r\n file_name = os.walk(self.train_path)\r\n file_name = [i for i in file_name]\r\n self.train_name = file_name[0][2]\r\n self.test_path = '/public/SSD/Harvard/test/'\r\n file_name = os.walk(self.test_path)\r\n file_name = [i for i in file_name]\r\n self.test_name = file_name[0][2]\r\n self.num_width = int(1040/128)\r\n self.num_hight = int(1390/128)\r\n self.train_len = self.num_hight * self.num_width *30\r\n self.test_len = 20\r\n self.LR_path = '/public/SSD/Harvard/LR/'\r\n # self.file = \r\n # self.\r\n self.reps = scio.loadmat('/home/zhu_19/data/instance/resp.mat')['resp']\r\n self.reps = np.transpose(self.reps,(1,0))\r\n # self.shuffle_index = [2,31,25,6,27,15,19,14,12,28,26,29,8,13,22,7,24,30,10,23,18,17,21,3,9,4,20,5,16,32,11,1]\r\n # save_name = []\r\n # for i in range(32):\r\n # save_name.append(self.file_name[self.shuffle_index[i]-1])\r\n # scio.savemat('save_name7048.mat',{'dict':save_name})\r\n self.TM = Training_mode\r\n def __len__(self):\r\n if self.TM == 'Train':\r\n return self.train_len\r\n elif self.TM == 'Test':\r\n return self.test_len\r\n # def zoom_img(self,input_img,ratio_):\r\n # return np.concatenate([ndimage.zoom(img,zoom = ratio_)[np.newaxis,:,:] for img in input_img],0)\r\n def zoom_img(self,input_img,ratio_):\r\n # return np.concatenate([ndimage.zoom(img,zoom = ratio_)[np.newaxis,:,:] for img in input_img],0)\r\n output_shape = int(input_img.shape[-1]*ratio_)\r\n # print(output_shape,'--------------------------------')\r\n # input_img = cv2.GaussianBlur(input_img,(7,7),2)\r\n # a = int(1/ratio_)\r\n # temp = int(a/2)\r\n # input_img = input_img[:,temp::a,temp::a]\r\n # \r\n return np.concatenate([self.zoom_img_(img,output_shape = output_shape)[np.newaxis,:,:] for img in input_img],0)\r\n def zoom_img_(self,input_img,output_shape):\r\n return input_img.reshape(input_img.shape[0],output_shape,-1).mean(-1).swapaxes(0,1).reshape(output_shape,output_shape,-1).mean(-1).swapaxes(0,1)\r\n def recon_img(self, input_img):\r\n return cv2.resize(cv2.resize(input_img.transpose(1,2,0),dsize=(self.shape1,self.shape1)),dsize = (self.output_shape , self.output_shape)).transpose(2,0,1)\r\n def __getitem__(self, index):\r\n if self.data_name == 'CAVE':\r\n # if self.TM == 'Test':\r\n # index = index + self.train_len//(self.num_pre_img**2)\r\n if self.TM=='Train':\r\n # if self.direct_data == True:\r\n index_img = index // self.num_pre_img**2 \r\n # index_img = self.shuffle_index[index_img]-1\r\n index_inside_image = index % self.num_pre_img**2 \r\n index_row = index_inside_image // self.num_pre_img \r\n index_col = index_inside_image % self.num_pre_img\r\n hsi_g = scio.loadmat(self.path+'X/'+str.rstrip(self.train_name[index_img]))\r\n # msi = scio.loadmat(self.path+'Y/'+self.file_name[index_img])\r\n # hsi = scio.loadmat(self.path+'Z/'+self.file_name[index_img])\r\n temp = hsi_g['msi']\r\n temp_a = cv2.GaussianBlur(temp,(7,7),2)[3::8,3::8,:]\r\n hsi_g = hsi_g['msi'][index_row*128:(index_row+1)*128,index_col*128:(index_col+1)*128,:]\r\n hsi = temp_a[index_row*16:(index_row+1)*16,index_col*16:(index_col+1)*16,:]\r\n # hsi = hsi['Zmsi'][index_row*4:(index_row+1)*4,index_col*4:(index_col+1)*4,:]\r\n # msi = msi['RGB'][index_row*128:(index_row+1)*128,index_col*128:(index_col+1)*128,:]\r\n msi = np.tensordot(hsi_g,self.reps,(-1,0))\r\n \r\n rotTimes = random.randint(0, 3)\r\n vFlip = random.randint(0, 1)\r\n hFlip = random.randint(0, 1)\r\n \r\n # Random rotation\r\n for j in range(rotTimes):\r\n hsi_g = np.rot90(hsi_g)\r\n hsi = np.rot90(hsi)\r\n msi = np.rot90(msi)\r\n\r\n # Random vertical Flip \r\n for j in range(vFlip):\r\n hsi_g = np.flip(hsi_g,axis=1)\r\n hsi = np.flip(hsi,axis=1)\r\n msi = np.flip(msi,axis=1)\r\n # hsi_g = hsi_g[:,::-1,:]\r\n # hsi = hsi[:,::-1,:]\r\n # msi = msi[:,::-1,:]\r\n \r\n # Random Horizontal Flip\r\n for j in range(hFlip):\r\n hsi_g = np.flip(hsi_g,axis=0)\r\n hsi = np.flip(hsi,axis=0)\r\n msi = np.flip(msi,axis=0)\r\n # hsi_g = hsi_g[::-1,:,:]\r\n # hsi = hsi[::-1,:,:]\r\n # msi = msi[::-1,:,:]\r\n hsi = np.transpose(hsi,(2,0,1)).copy()\r\n msi = np.transpose(msi,(2,0,1)).copy()\r\n hsi_g = np.transpose(hsi_g,(2,0,1)).copy()\r\n # print('shape of tensor {} {} {}'.format(hsi.shape,msi.shape,hsi_g.shape))\r\n elif self.TM=='Test':\r\n hsi_g = scio.loadmat(self.path+'X/'+str.rstrip(self.test_name[index]))\r\n # msi = scio.loadmat(self.path+'Y/'+self.file_name[index])\r\n # hsi = scio.loadmat(self.path+'Z/'+self.file_name[index])\r\n hsi_g = hsi_g['msi']\r\n hsi = cv2.GaussianBlur(hsi_g,(7,7),2)[3::8,3::8,:]\r\n msi = np.tensordot(hsi_g,self.reps,(-1,0))\r\n msi = np.transpose(msi,(2,0,1))\r\n # hsi_g = hsi_g[index_row*128:(index_row+1)*128,index_col*128:(index_col+1)*128,:]\r\n hsi_g = np.transpose(hsi_g,(2,0,1))\r\n # hsi = hsi[index_row*4:(index_row+1)*4,index_col*4:(index_col+1)*4,:]\r\n hsi = np.transpose(hsi,(2,0,1))\r\n # hsi = np.transpose(hsi['Zmsi'],(2,0,1))\r\n # msi = msi[index_row*128:(index_row+1)*128,index_col*128:(index_col+1)*128,:]\r\n # msi = np.transpose(msi['RGB'],(2,0,1))\r\n elif self.data_name == 'HARVARD':\r\n index_img = index // (self.num_width*self.num_hight)\r\n # index_img = self.shuffle_index[index_img]-1\r\n index_inside_image = index % (self.num_hight*self.num_width)\r\n index_row = index_inside_image // self.num_hight\r\n index_col = index_inside_image % self.num_hight\r\n file=h5py.File('/public/SSD/Harvard/data.h5','r')\r\n file2=h5py.File('/public/SSD/Harvard/Lr_data.h5','r')\r\n if self.TM=='Train':\r\n hsi_g = file[self.train_name[index_img]][:]\r\n hsi = file2[self.train_name[index_img]][:]\r\n # hsi_g = scio.loadmat(self.train_path+self.train_name[index_img])['ref']\r\n # hsi = scio.loadmat(self.LR_path+self.train_name[index_img])['ref']\r\n # msi = scio.loadmat(self.path+'Y/'+self.file_name[index_img])\r\n # temp = hsi_g['ref']\r\n # print('Shape: ------------------ shape of hsi_g:{}'.format(hsi_g['ref'].shape))\r\n # temp_a = cv2.GaussianBlur(temp,(7,7),2)[3::8,3::8,:]\r\n # print('Shape: ------------------ shape of read:{} hsi_g:{} index:row{},index:col{}'.format(temp.shape,hsi_g['ref'].shape,index_row,index_col))\r\n hsi_g = hsi_g[index_row*128:(index_row+1)*128,index_col*128:(index_col+1)*128,:]\r\n hsi = hsi[index_row*16:(index_row+1)*16,index_col*16:(index_col+1)*16,:]\r\n # print('Shape: +++++++++++++++++++++ shape of read:{} hsi_g:{} index:row{},index:col{}'.format(temp.shape,hsi_g.shape,index_row,index_col))\r\n # hsi = hsi['Zmsi'][index_row*4:(index_row+1)*4,index_col*4:(index_col+1)*4,:]\r\n # msi = msi['RGB'][index_row*128:(index_row+1)*128,index_col*128:(index_col+1)*128,:]\r\n msi = np.tensordot(hsi_g,self.reps,(-1,0))\r\n rotTimes = random.randint(0, 3)\r\n vFlip = random.randint(0, 1)\r\n hFlip = random.randint(0, 1)\r\n \r\n # Random rotation\r\n for j in range(rotTimes):\r\n hsi_g = np.rot90(hsi_g)\r\n hsi = np.rot90(hsi)\r\n msi = np.rot90(msi)\r\n\r\n # Random vertical Flip \r\n for j in range(vFlip):\r\n hsi_g = np.flip(hsi_g,axis=1)\r\n hsi = np.flip(hsi,axis=1)\r\n msi = np.flip(msi,axis=1)\r\n # hsi_g = hsi_g[:,::-1,:]\r\n # hsi = hsi[:,::-1,:]\r\n # msi = msi[:,::-1,:]\r\n \r\n # Random Horizontal Flip\r\n for j in range(hFlip):\r\n hsi_g = np.flip(hsi_g,axis=0)\r\n hsi = np.flip(hsi,axis=0)\r\n msi = np.flip(msi,axis=0)\r\n # hsi_g = hsi_g[::-1,:,:]\r\n # hsi = hsi[::-1,:,:]\r\n # msi = msi[::-1,:,:]\r\n hsi = np.transpose(hsi,(2,0,1)).copy()\r\n msi = np.transpose(msi,(2,0,1)).copy()\r\n hsi_g = np.transpose(hsi_g,(2,0,1)).copy()\r\n # print('shape of tensor {} {} {}'.format(hsi.shape,msi.shape,hsi_g.shape))\r\n elif self.TM=='Test':\r\n hsi_g = file[self.test_name[index_img]][:]\r\n hsi = file2[self.test_name[index_img]][:]\r\n # hsi_g = scio.loadmat(self.test_path+self.test_name[index])['ref']\r\n # hsi = scio.loadmat(self.LR_path+self.test_name[index_img])['ref']\r\n # msi = scio.loadmat(self.path+'Y/'+self.file_name[index])\r\n # hsi = scio.loadmat(self.path+'Z/'+self.file_name[index])\r\n # hsi_g = hsi_g\r\n # hsi = cv2.GaussianBlur(hsi_g,(7,7),2)[3::8,3::8,:]\r\n msi = np.tensordot(hsi_g,self.reps,(-1,0))\r\n msi = np.transpose(msi,(2,0,1))\r\n # hsi_g = hsi_g[index_row*128:(index_row+1)*128,index_col*128:(index_col+1)*128,:]\r\n hsi_g = np.transpose(hsi_g,(2,0,1))\r\n # hsi = hsi[index_row*4:(index_row+1)*4,index_col*4:(index_col+1)*4,:]\r\n hsi = np.transpose(hsi,(2,0,1))\r\n # hsi = np.transpose(hsi['Zmsi'],(2,0,1))\r\n # msi = msi[index_row*128:(index_row+1)*128,index_col*128:(index_col+1)*128,:]\r\n # msi = np.transpose(msi['RGB'],(2,0,1))\r\n\r\n\r\n # hsi = self.zoom_img(hsi_g,1/8)\r\n hsi_resize = hsi\r\n # hsi_8 = self.zoom_img(hsi_g, 1/4)\r\n # hsi_2 = self.zoom_img(hsi_g, 1/2)\r\n # msi_8 = self.zoom_img(msi,1/4)\r\n # msi_2 = self.zoom_img(msi,1/2)\r\n return ((hsi,hsi,hsi), hsi_g, hsi_resize, (msi,msi,msi))\r\n " ]
[ [ "numpy.rot90", "scipy.io.loadmat", "numpy.tensordot", "numpy.transpose", "numpy.load", "numpy.flip" ] ]
marcusfilipesr/ross
[ "e00bc10e694ecaf82ee24af82f649da7458fc91d" ]
[ "ross/results.py" ]
[ "\"\"\"ROSS plotting module.\n\nThis module returns graphs for each type of analyses in rotor_assembly.py.\n\"\"\"\nimport copy\nimport inspect\nfrom abc import ABC\nfrom collections.abc import Iterable\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport toml\nfrom plotly import graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom scipy import linalg as la\n\nfrom ross.plotly_theme import tableau_colors\nfrom ross.units import Q_, check_units\nfrom ross.utils import intersection\n\n__all__ = [\n \"CriticalSpeedResults\",\n \"ModalResults\",\n \"CampbellResults\",\n \"FrequencyResponseResults\",\n \"ForcedResponseResults\",\n \"StaticResults\",\n \"SummaryResults\",\n \"ConvergenceResults\",\n \"TimeResponseResults\",\n \"UCSResults\",\n \"Level1Results\",\n]\n\n\nclass Results(ABC):\n \"\"\"Results class.\n\n This class is a general abstract class to be implemented in other classes\n for post-processing results, in order to add saving and loading data options.\n \"\"\"\n\n def save(self, file):\n \"\"\"Save results in a .toml file.\n\n This function will save the simulation results to a .toml file.\n The file will have all the argument's names and values that are needed to\n reinstantiate the class.\n\n Parameters\n ----------\n file : str, pathlib.Path\n The name of the file the results will be saved in.\n\n Examples\n --------\n >>> # Example running a unbalance response\n >>> from tempfile import tempdir\n >>> from pathlib import Path\n >>> import ross as rs\n\n >>> # Running an example\n >>> rotor = rs.rotor_example()\n >>> speed = np.linspace(0, 1000, 101)\n >>> response = rotor.run_unbalance_response(node=3,\n ... unbalance_magnitude=0.001,\n ... unbalance_phase=0.0,\n ... frequency=speed)\n\n >>> # create path for a temporary file\n >>> file = Path(tempdir) / 'unb_resp.toml'\n >>> response.save(file)\n \"\"\"\n # get __init__ arguments\n signature = inspect.signature(self.__init__)\n args_list = list(signature.parameters)\n args = {arg: getattr(self, arg) for arg in args_list}\n try:\n data = toml.load(file)\n except FileNotFoundError:\n data = {}\n\n data[f\"{self.__class__.__name__}\"] = args\n with open(file, \"w\") as f:\n toml.dump(data, f, encoder=toml.TomlNumpyEncoder())\n\n if \"rotor\" in args.keys():\n aux_file = str(file)[:-5] + \"_rotor\" + str(file)[-5:]\n args[\"rotor\"].save(aux_file)\n\n @classmethod\n def read_toml_data(cls, data):\n \"\"\"Read and parse data stored in a .toml file.\n\n The data passed to this method needs to be according to the\n format saved in the .toml file by the .save() method.\n\n Parameters\n ----------\n data : dict\n Dictionary obtained from toml.load().\n\n Returns\n -------\n The result object.\n \"\"\"\n return cls(**data)\n\n @classmethod\n def load(cls, file):\n \"\"\"Load results from a .toml file.\n\n This function will load the simulation results from a .toml file.\n The file must have all the argument's names and values that are needed to\n reinstantiate the class.\n\n Parameters\n ----------\n file : str, pathlib.Path\n The name of the file the results will be loaded from.\n\n Examples\n --------\n >>> # Example running a stochastic unbalance response\n >>> from tempfile import tempdir\n >>> from pathlib import Path\n >>> import ross as rs\n >>> # Running an example\n >>> rotor = rs.rotor_example()\n >>> freq_range = np.linspace(0, 500, 31)\n >>> n = 3\n >>> m = 0.01\n >>> p = 0.0\n >>> results = rotor.run_unbalance_response(n, m, p, freq_range)\n >>> # create path for a temporary file\n >>> file = Path(tempdir) / 'unb_resp.toml'\n >>> results.save(file)\n >>> # Loading file\n >>> results2 = rs.ForcedResponseResults.load(file)\n >>> abs(results2.forced_resp).all() == abs(results.forced_resp).all()\n True\n \"\"\"\n str_type = [np.dtype(f\"<U4{i}\") for i in range(10)]\n\n data = toml.load(file)\n data = list(data.values())[0]\n for key, value in data.items():\n if key == \"rotor\":\n aux_file = str(file)[:-5] + \"_rotor\" + str(file)[-5:]\n from ross.rotor_assembly import Rotor\n\n data[key] = Rotor.load(aux_file)\n\n elif isinstance(value, Iterable):\n data[key] = np.array(value)\n if data[key].dtype in str_type:\n data[key] = np.array(value).astype(np.complex128)\n\n return cls.read_toml_data(data)\n\n\nclass CriticalSpeedResults(Results):\n \"\"\"Class used to store results from run_critical_speed() method.\n\n Parameters\n ----------\n _wn : array\n Undamped critical speeds array.\n _wd : array\n Undamped critical speeds array.\n log_dec : array\n Logarithmic decrement for each critical speed.\n damping_ratio : array\n Damping ratio for each critical speed.\n whirl_direction : array\n Whirl direction for each critical speed. Can be forward, backward or mixed.\n \"\"\"\n\n def __init__(self, _wn, _wd, log_dec, damping_ratio, whirl_direction):\n self._wn = _wn\n self._wd = _wd\n self.log_dec = log_dec\n self.damping_ratio = damping_ratio\n self.whirl_direction = whirl_direction\n\n def wn(self, frequency_units=\"rad/s\"):\n \"\"\"Convert units for undamped critical speeds.\n\n Parameters\n ----------\n frequency_units : str, optional\n Critical speeds units.\n Default is rad/s\n\n Returns\n -------\n wn : array\n Undamped critical speeds array.\n \"\"\"\n return Q_(self.__dict__[\"_wn\"], \"rad/s\").to(frequency_units).m\n\n def wd(self, frequency_units=\"rad/s\"):\n \"\"\"Convert units for damped critical speeds.\n\n Parameters\n ----------\n frequency_units : str, optional\n Critical speeds units.\n Default is rad/s\n\n Returns\n -------\n wd : array\n Undamped critical speeds array.\n \"\"\"\n return Q_(self.__dict__[\"_wd\"], \"rad/s\").to(frequency_units).m\n\n\nclass ModalResults(Results):\n \"\"\"Class used to store results and provide plots for Modal Analysis.\n\n Two options for plottting are available: plot_mode3D (mode shape 3D view)\n and plot_mode2D (mode shape 2D view). The user chooses between them using\n the respective methods.\n\n Parameters\n ----------\n speed : float\n Rotor speed.\n evalues : array\n Eigenvalues array.\n evectors : array\n Eigenvectors array.\n wn : array\n Undamped natural frequencies array.\n wd : array\n Damped natural frequencies array.\n log_dec : array\n Logarithmic decrement for each mode.\n damping_ratio : array\n Damping ratio for each mode.\n ndof : int\n Number of degrees of freedom.\n nodes : list\n List of nodes number.\n nodes_pos : list\n List of nodes positions.\n shaft_elements_length : list\n List with Rotor shaft elements lengths.\n \"\"\"\n\n def __init__(\n self,\n speed,\n evalues,\n evectors,\n wn,\n wd,\n damping_ratio,\n log_dec,\n ndof,\n nodes,\n nodes_pos,\n shaft_elements_length,\n ):\n self.speed = speed\n self.evalues = evalues\n self.evectors = evectors\n self.wn = wn\n self.wd = wd\n self.damping_ratio = damping_ratio\n self.log_dec = log_dec\n self.ndof = ndof\n self.nodes = nodes\n self.nodes_pos = nodes_pos\n self.shaft_elements_length = shaft_elements_length\n self.modes = self.evectors[: self.ndof]\n kappa_modes = []\n for mode in range(len(self.wn)):\n kappa_color = []\n kappa_mode = self.kappa_mode(mode)\n for kappa in kappa_mode:\n kappa_color.append(\"blue\" if kappa > 0 else \"red\")\n kappa_modes.append(kappa_color)\n self.kappa_modes = kappa_modes\n\n @staticmethod\n def whirl(kappa_mode):\n \"\"\"Evaluate the whirl of a mode.\n\n Parameters\n ----------\n kappa_mode : list\n A list with the value of kappa for each node related\n to the mode/natural frequency of interest.\n\n Returns\n -------\n whirldir : str\n A string indicating the direction of precession related to the\n kappa_mode.\n\n Example\n -------\n >>> kappa_mode = [-5.06e-13, -3.09e-13, -2.91e-13, 0.011, -4.03e-13, -2.72e-13, -2.72e-13]\n >>> ModalResults.whirl(kappa_mode)\n 'Forward'\n \"\"\"\n if all(kappa >= -1e-3 for kappa in kappa_mode):\n whirldir = \"Forward\"\n elif all(kappa <= 1e-3 for kappa in kappa_mode):\n whirldir = \"Backward\"\n else:\n whirldir = \"Mixed\"\n return whirldir\n\n @staticmethod\n @np.vectorize\n def whirl_to_cmap(whirl):\n \"\"\"Map the whirl to a value.\n\n Parameters\n ----------\n whirl: string\n A string indicating the whirl direction related to the kappa_mode\n\n Returns\n -------\n An array with reference index for the whirl direction\n\n Example\n -------\n >>> whirl = 'Backward'\n >>> whirl_to_cmap(whirl)\n array(1.)\n \"\"\"\n if whirl == \"Forward\":\n return 0.0\n elif whirl == \"Backward\":\n return 1.0\n elif whirl == \"Mixed\":\n return 0.5\n\n def H_kappa(self, node, w, return_T=False):\n r\"\"\"Calculate the H matrix for a given node and natural frequency.\n\n The matrix H contains information about the whirl direction,\n the orbit minor and major axis and the orbit inclination.\n The matrix is calculated by :math:`H = T.T^T` where the\n matrix T is constructed using the eigenvector corresponding\n to the natural frequency of interest:\n\n .. math::\n \n \\begin{eqnarray}\n \\begin{bmatrix}\n u(t)\\\\\n v(t)\n \\end{bmatrix}\n = \\mathfrak{R}\\Bigg(\n \\begin{bmatrix}\n r_u e^{j\\eta_u}\\\\\n r_v e^{j\\eta_v}\n \\end{bmatrix}\\Bigg)\n e^{j\\omega_i t}\n =\n \\begin{bmatrix}\n r_u cos(\\eta_u + \\omega_i t)\\\\\n r_v cos(\\eta_v + \\omega_i t)\n \\end{bmatrix}\n = {\\bf T}\n \\begin{bmatrix}\n cos(\\omega_i t)\\\\\n sin(\\omega_i t)\n \\end{bmatrix}\n \\end{eqnarray}\n\n Where :math:`r_u e^{j\\eta_u}` e :math:`r_v e^{j\\eta_v}` are the\n elements of the *i*\\th eigenvector, corresponding to the node and\n natural frequency of interest (mode).\n\n .. math::\n\n {\\bf T} =\n \\begin{bmatrix}\n r_u cos(\\eta_u) & -r_u sin(\\eta_u)\\\\\n r_u cos(\\eta_u) & -r_v sin(\\eta_v)\n \\end{bmatrix}\n\n Parameters\n ----------\n node: int\n Node for which the matrix H will be calculated.\n w: int\n Index corresponding to the natural frequency\n of interest.\n return_T: bool, optional\n If True, returns the H matrix and a dictionary with the\n values for :math:`r_u, r_v, \\eta_u, \\eta_v`.\n\n Default is false.\n\n Returns\n -------\n H: array\n Matrix H.\n Tdic: dict\n Dictionary with values for :math:`r_u, r_v, \\eta_u, \\eta_v`.\n\n It will be returned only if return_T is True.\n \"\"\"\n # get vector of interest based on freqs\n vector = self.evectors[4 * node : 4 * node + 2, w]\n # get translation sdofs for specified node for each mode\n u = vector[0]\n v = vector[1]\n ru = np.absolute(u)\n rv = np.absolute(v)\n\n nu = np.angle(u)\n nv = np.angle(v)\n # fmt: off\n T = np.array([[ru * np.cos(nu), -ru * np.sin(nu)],\n [rv * np.cos(nv), -rv * np.sin(nv)]])\n # fmt: on\n H = T @ T.T\n\n if return_T:\n Tdic = {\"ru\": ru, \"rv\": rv, \"nu\": nu, \"nv\": nv}\n return H, Tdic\n\n return H\n\n def kappa(self, node, w, wd=True):\n r\"\"\"Calculate kappa for a given node and natural frequency.\n\n frequency is the the index of the natural frequency of interest.\n The function calculates the orbit parameter :math:`\\kappa`:\n\n .. math::\n\n \\kappa = \\pm \\sqrt{\\lambda_2 / \\lambda_1}\n\n Where :math:`\\sqrt{\\lambda_1}` is the length of the semiminor axes\n and :math:`\\sqrt{\\lambda_2}` is the length of the semimajor axes.\n\n If :math:`\\kappa = \\pm 1`, the orbit is circular.\n\n If :math:`\\kappa` is positive we have a forward rotating orbit\n and if it is negative we have a backward rotating orbit.\n\n Parameters\n ----------\n node: int\n Node for which kappa will be calculated.\n w: int\n Index corresponding to the natural frequency\n of interest.\n wd: bool\n If True, damping natural frequencies are used.\n\n Default is true.\n\n Returns\n -------\n kappa: dict\n A dictionary with values for the natural frequency,\n major axis, minor axis and kappa.\n \"\"\"\n if wd:\n nat_freq = self.wd[w]\n else:\n nat_freq = self.wn[w]\n\n H, Tvals = self.H_kappa(node, w, return_T=True)\n nu = Tvals[\"nu\"]\n nv = Tvals[\"nv\"]\n\n lam = la.eig(H)[0]\n\n # lam is the eigenvalue -> sqrt(lam) is the minor/major axis.\n # kappa encodes the relation between the axis and the precession.\n minor = np.sqrt(lam.min())\n major = np.sqrt(lam.max())\n kappa = minor / major\n diff = nv - nu\n\n # we need to evaluate if 0 < nv - nu < pi.\n if diff < -np.pi:\n diff += 2 * np.pi\n elif diff > np.pi:\n diff -= 2 * np.pi\n\n # if nv = nu or nv = nu + pi then the response is a straight line.\n if diff == 0 or diff == np.pi:\n kappa = 0\n\n # if 0 < nv - nu < pi, then a backward rotating mode exists.\n elif 0 < diff < np.pi:\n kappa *= -1\n\n k = {\n \"Frequency\": nat_freq,\n \"Minor axes\": np.real(minor),\n \"Major axes\": np.real(major),\n \"kappa\": np.real(kappa),\n }\n\n return k\n\n def kappa_mode(self, w):\n r\"\"\"Evaluate kappa values.\n\n This function evaluates kappa given the index of the natural frequency\n of interest.\n Values of kappa are evaluated for each node of the\n corresponding frequency mode.\n\n Parameters\n ----------\n w: int\n Index corresponding to the natural frequency\n of interest.\n\n Returns\n -------\n kappa_mode: list\n A list with the value of kappa for each node related\n to the mode/natural frequency of interest.\n \"\"\"\n kappa_mode = [self.kappa(node, w)[\"kappa\"] for node in self.nodes]\n return kappa_mode\n\n def whirl_direction(self):\n r\"\"\"Get the whirl direction for each frequency.\n\n Returns\n -------\n whirl_w : array\n An array of strings indicating the direction of precession related\n to the kappa_mode. Backward, Mixed or Forward depending on values\n of kappa_mode.\n \"\"\"\n # whirl direction/values are methods because they are expensive.\n whirl_w = [self.whirl(self.kappa_mode(wd)) for wd in range(len(self.wd))]\n\n return np.array(whirl_w)\n\n def whirl_values(self):\n r\"\"\"Get the whirl value (0., 0.5, or 1.) for each frequency.\n\n Returns\n -------\n whirl_to_cmap\n 0.0 - if the whirl is Forward\n 0.5 - if the whirl is Mixed\n 1.0 - if the whirl is Backward\n \"\"\"\n return self.whirl_to_cmap(self.whirl_direction())\n\n def calc_mode_shape(self, mode=None, evec=None):\n r\"\"\"Calculate the arrays describing the mode shapes.\n\n Parameters\n ----------\n mode : int\n The n'th vibration mode\n Default is None\n evec : array\n Array containing the system eigenvectors\n\n Returns\n -------\n xn : array\n absolut nodal displacement - X direction\n yn : array\n absolut nodal displacement - Y direction\n zn : array\n absolut nodal displacement - Z direction\n x_circles : array\n orbit description - X direction\n y_circles : array\n orbit description - Y direction\n z_circles_pos : array\n axial location of each orbit\n nn : int\n number of points to plot lines\n \"\"\"\n if evec is None:\n evec = self.modes[:, mode]\n nodes = self.nodes\n nodes_pos = self.nodes_pos\n shaft_elements_length = self.shaft_elements_length\n\n modex = evec[0::4]\n modey = evec[1::4]\n\n xmax, ixmax = max(abs(modex)), np.argmax(abs(modex))\n ymax, iymax = max(abs(modey)), np.argmax(abs(modey))\n\n if ymax > 0.4 * xmax:\n evec /= modey[iymax]\n else:\n evec /= modex[ixmax]\n\n modex = evec[0::4]\n modey = evec[1::4]\n\n num_points = 201\n c = np.linspace(0, 2 * np.pi, num_points)\n circle = np.exp(1j * c)\n\n x_circles = np.zeros((num_points, len(nodes)))\n y_circles = np.zeros((num_points, len(nodes)))\n z_circles_pos = np.zeros((num_points, len(nodes)))\n\n for node in nodes:\n x = modex[node] * circle\n x_circles[:, node] = np.real(x)\n y = modey[node] * circle\n y_circles[:, node] = np.real(y)\n z_circles_pos[:, node] = nodes_pos[node]\n\n # plot lines\n nn = 21\n zeta = np.linspace(0, 1, nn)\n onn = np.ones_like(zeta)\n\n zeta = zeta.reshape(nn, 1)\n onn = onn.reshape(nn, 1)\n\n xn = np.zeros(nn * (len(nodes) - 1))\n yn = np.zeros(nn * (len(nodes) - 1))\n zn = np.zeros(nn * (len(nodes) - 1))\n\n N1 = onn - 3 * zeta ** 2 + 2 * zeta ** 3\n N2 = zeta - 2 * zeta ** 2 + zeta ** 3\n N3 = 3 * zeta ** 2 - 2 * zeta ** 3\n N4 = -(zeta ** 2) + zeta ** 3\n\n for Le, n in zip(shaft_elements_length, nodes):\n node_pos = nodes_pos[n]\n Nx = np.hstack((N1, Le * N2, N3, Le * N4))\n Ny = np.hstack((N1, -Le * N2, N3, -Le * N4))\n\n xx = [4 * n, 4 * n + 3, 4 * n + 4, 4 * n + 7]\n yy = [4 * n + 1, 4 * n + 2, 4 * n + 5, 4 * n + 6]\n\n pos0 = nn * n\n pos1 = nn * (n + 1)\n\n xn[pos0:pos1] = Nx @ evec[xx].real\n yn[pos0:pos1] = Ny @ evec[yy].real\n zn[pos0:pos1] = (node_pos * onn + Le * zeta).reshape(nn)\n\n return xn, yn, zn, x_circles, y_circles, z_circles_pos, nn\n\n def plot_mode_3d(\n self,\n mode=None,\n evec=None,\n fig=None,\n frequency_type=\"wd\",\n title=None,\n length_units=\"m\",\n frequency_units=\"rad/s\",\n **kwargs,\n ):\n \"\"\"Plot (3D view) the mode shapes.\n\n Parameters\n ----------\n mode : int\n The n'th vibration mode\n Default is None\n evec : array\n Array containing the system eigenvectors\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n frequency_type : str, optional\n \"wd\" calculates de map for the damped natural frequencies.\n \"wn\" calculates de map for the undamped natural frequencies.\n Defaults is \"wd\".\n title : str, optional\n A brief title to the mode shape plot, it will be displayed above other\n relevant data in the plot area. It does not modify the figure layout from\n Plotly.\n length_units : str, optional\n length units.\n Default is 'm'.\n frequency_units : str, optional\n Frequency units that will be used in the plot title.\n Default is rad/s.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n if fig is None:\n fig = go.Figure()\n\n nodes = self.nodes\n kappa_mode = self.kappa_modes[mode]\n xn, yn, zn, xc, yc, zc_pos, nn = self.calc_mode_shape(mode=mode, evec=evec)\n\n # fmt: off\n frequency = {\n \"wd\": f\"ω<sub>d</sub> = {Q_(self.wd[mode], 'rad/s').to(frequency_units).m:.1f}\",\n \"wn\": f\"ω<sub>n</sub> = {Q_(self.wn[mode], 'rad/s').to(frequency_units).m:.1f}\",\n \"speed\": f\"Speed = {Q_(self.speed, 'rad/s').to(frequency_units).m:.1f}\",\n }\n # fmt: on\n\n for node in nodes:\n fig.add_trace(\n go.Scatter3d(\n x=Q_(zc_pos[10:, node], \"m\").to(length_units).m,\n y=xc[10:, node],\n z=yc[10:, node],\n mode=\"lines\",\n line=dict(color=kappa_mode[node]),\n name=\"node {}\".format(node),\n showlegend=False,\n hovertemplate=(\n \"Nodal Position: %{x:.2f}<br>\"\n + \"X - Relative Displacement: %{y:.2f}<br>\"\n + \"Y - Relative Displacement: %{z:.2f}\"\n ),\n )\n )\n fig.add_trace(\n go.Scatter3d(\n x=Q_([zc_pos[10, node]], \"m\").to(length_units).m,\n y=[xc[10, node]],\n z=[yc[10, node]],\n mode=\"markers\",\n marker=dict(color=kappa_mode[node]),\n name=\"node {}\".format(node),\n showlegend=False,\n )\n )\n\n fig.add_trace(\n go.Scatter3d(\n x=Q_(zn, \"m\").to(length_units).m,\n y=xn,\n z=yn,\n mode=\"lines\",\n line=dict(color=\"black\", dash=\"dash\"),\n name=\"mode shape\",\n showlegend=False,\n )\n )\n\n # plot center line\n zn_cl0 = -(zn[-1] * 0.1)\n zn_cl1 = zn[-1] * 1.1\n zn_cl = np.linspace(zn_cl0, zn_cl1, 30)\n fig.add_trace(\n go.Scatter3d(\n x=Q_(zn_cl, \"m\").to(length_units).m,\n y=zn_cl * 0,\n z=zn_cl * 0,\n mode=\"lines\",\n line=dict(color=\"black\", dash=\"dashdot\"),\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n\n if title is None:\n title = \"\"\n\n fig.update_layout(\n scene=dict(\n xaxis=dict(\n title=dict(text=f\"Rotor Length ({length_units})\"),\n autorange=\"reversed\",\n nticks=5,\n ),\n yaxis=dict(\n title=dict(text=\"Relative Displacement\"), range=[-2, 2], nticks=5\n ),\n zaxis=dict(\n title=dict(text=\"Relative Displacement\"), range=[-2, 2], nticks=5\n ),\n ),\n title=dict(\n text=(\n f\"{title}<br>\"\n f\"Mode {mode + 1} | \"\n f\"{frequency['speed']} {frequency_units} | \"\n f\"whirl: {self.whirl_direction()[mode]} | \"\n f\"{frequency[frequency_type]} {frequency_units} | \"\n f\"Log. Dec. = {self.log_dec[mode]:.1f} | \"\n f\"Damping ratio = {self.damping_ratio[mode]:.2f}\"\n )\n ),\n **kwargs,\n )\n\n return fig\n\n def plot_mode_2d(\n self,\n mode=None,\n evec=None,\n fig=None,\n frequency_type=\"wd\",\n title=None,\n length_units=\"m\",\n frequency_units=\"rad/s\",\n **kwargs,\n ):\n \"\"\"Plot (2D view) the mode shapes.\n\n Parameters\n ----------\n mode : int\n The n'th vibration mode\n Default is None\n evec : array\n Array containing the system eigenvectors\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n frequency_type : str, optional\n \"wd\" calculates de map for the damped natural frequencies.\n \"wn\" calculates de map for the undamped natural frequencies.\n Defaults is \"wd\".\n title : str, optional\n A brief title to the mode shape plot, it will be displayed above other\n relevant data in the plot area. It does not modify the figure layout from\n Plotly.\n length_units : str, optional\n length units.\n Default is 'm'.\n frequency_units : str, optional\n Frequency units that will be used in the plot title.\n Default is rad/s.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n xn, yn, zn, xc, yc, zc_pos, nn = self.calc_mode_shape(mode=mode, evec=evec)\n nodes_pos = Q_(self.nodes_pos, \"m\").to(length_units).m\n\n theta = np.arctan(xn[0] / yn[0])\n vn = xn * np.sin(theta) + yn * np.cos(theta)\n\n # remove repetitive values from zn and vn\n idx_remove = []\n for i in range(1, len(zn)):\n if zn[i] == zn[i - 1]:\n idx_remove.append(i)\n zn = np.delete(zn, idx_remove)\n vn = np.delete(vn, idx_remove)\n\n if fig is None:\n fig = go.Figure()\n\n colors = dict(Backward=\"red\", Mixed=\"black\", Forward=\"blue\")\n\n # fmt: off\n frequency = {\n \"wd\": f\"ω<sub>d</sub> = {Q_(self.wd[mode], 'rad/s').to(frequency_units).m:.1f}\",\n \"wn\": f\"ω<sub>n</sub> = {Q_(self.wn[mode], 'rad/s').to(frequency_units).m:.1f}\",\n \"speed\": f\"Speed = {Q_(self.speed, 'rad/s').to(frequency_units).m:.1f}\",\n }\n # fmt: on\n whirl_dir = colors[self.whirl_direction()[mode]]\n\n fig.add_trace(\n go.Scatter(\n x=Q_(zn, \"m\").to(length_units).m,\n y=vn / vn[np.argmax(np.abs(vn))],\n mode=\"lines\",\n line=dict(color=whirl_dir),\n name=\"mode shape\",\n showlegend=False,\n )\n )\n # plot center line\n fig.add_trace(\n go.Scatter(\n x=nodes_pos,\n y=np.zeros(len(nodes_pos)),\n mode=\"lines\",\n line=dict(color=\"black\", dash=\"dashdot\"),\n name=\"centerline\",\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n\n if title is None:\n title = \"\"\n\n fig.update_xaxes(title_text=f\"Rotor Length ({length_units})\")\n fig.update_yaxes(title_text=\"Relative Displacement\")\n fig.update_layout(\n title=dict(\n text=(\n f\"{title}<br>\"\n f\"Mode {mode + 1} | \"\n f\"{frequency['speed']} {frequency_units} | \"\n f\"whirl: {self.whirl_direction()[mode]} | \"\n f\"{frequency[frequency_type]} {frequency_units} | \"\n f\"Log. Dec. = {self.log_dec[mode]:.1f} | \"\n f\"Damping ratio = {self.damping_ratio[mode]:.2f}\"\n )\n ),\n **kwargs,\n )\n\n return fig\n\n\nclass CampbellResults(Results):\n \"\"\"Class used to store results and provide plots for Campbell Diagram.\n\n It's possible to visualize multiples harmonics in a single plot to check\n other speeds which also excite a specific natural frequency.\n\n Parameters\n ----------\n speed_range : array\n Array with the speed range in rad/s.\n wd : array\n Array with the damped natural frequencies\n log_dec : array\n Array with the Logarithmic decrement\n whirl_values : array\n Array with the whirl values (0, 0.5 or 1)\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n\n def __init__(self, speed_range, wd, log_dec, damping_ratio, whirl_values):\n self.speed_range = speed_range\n self.wd = wd\n self.log_dec = log_dec\n self.damping_ratio = damping_ratio\n self.whirl_values = whirl_values\n\n @check_units\n def plot(\n self,\n harmonics=[1],\n frequency_units=\"rad/s\",\n damping_parameter=\"log_dec\",\n frequency_range=None,\n damping_range=None,\n fig=None,\n **kwargs,\n ):\n \"\"\"Create Campbell Diagram figure using Plotly.\n\n Parameters\n ----------\n harmonics: list, optional\n List withe the harmonics to be plotted.\n The default is to plot 1x.\n frequency_units : str, optional\n Frequency units.\n Default is \"rad/s\"\n damping_parameter : str, optional\n Define which value to show for damping. We can use \"log_dec\" or \"damping_ratio\".\n Default is \"log_dec\".\n frequency_range : tuple, pint.Quantity(tuple), optional\n Tuple with (min, max) values for the frequencies that will be plotted.\n Frequencies that are not within the range are filtered out and are not plotted.\n It is possible to use a pint Quantity (e.g. Q_((2000, 1000), \"RPM\")).\n Default is None (no filter).\n damping_range : tuple, optional\n Tuple with (min, max) values for the damping parameter that will be plotted.\n Damping values that are not within the range are filtered out and are not plotted.\n Default is None (no filter).\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n\n Examples\n --------\n >>> import ross as rs\n >>> import numpy as np\n >>> Q_ = rs.Q_\n >>> rotor = rs.rotor_example()\n >>> speed = np.linspace(0, 400, 101)\n >>> camp = rotor.run_campbell(speed)\n >>> fig = camp.plot(\n ... harmonics=[1, 2],\n ... damping_parameter=\"damping_ratio\",\n ... frequency_range=Q_((2000, 10000), \"RPM\"),\n ... damping_range=(-0.1, 100),\n ... frequency_units=\"RPM\",\n ... )\n \"\"\"\n if damping_parameter == \"log_dec\":\n damping_values = self.log_dec\n title_text = \"<b>Log Dec</b>\"\n elif damping_parameter == \"damping_ratio\":\n damping_values = self.damping_ratio\n title_text = \"<b>Damping Ratio</b>\"\n else:\n raise ValueError(\n f\"damping_parameter can be 'log_dec' or 'damping_ratio'. {damping_parameter} is not valid\"\n )\n\n wd = self.wd\n num_frequencies = wd.shape[1]\n\n whirl = self.whirl_values\n speed_range = self.speed_range\n\n if fig is None:\n fig = go.Figure()\n\n default_values = dict(\n coloraxis_cmin=0.0,\n coloraxis_cmax=1.0,\n coloraxis_colorscale=\"rdbu\",\n coloraxis_colorbar=dict(title=dict(text=title_text, side=\"right\")),\n )\n for k, v in default_values.items():\n kwargs.setdefault(k, v)\n\n crit_x = []\n crit_y = []\n for i in range(num_frequencies):\n w_i = wd[:, i]\n\n for harm in harmonics:\n x1 = speed_range\n y1 = w_i\n x2 = speed_range\n y2 = harm * speed_range\n\n x, y = intersection(x1, y1, x2, y2)\n crit_x.extend(x)\n crit_y.extend(y)\n\n # filter frequency range\n if frequency_range is not None:\n crit_x_filtered = []\n crit_y_filtered = []\n for x, y in zip(crit_x, crit_y):\n if frequency_range[0] < y < frequency_range[1]:\n crit_x_filtered.append(x)\n crit_y_filtered.append(y)\n crit_x = crit_x_filtered\n crit_y = crit_y_filtered\n\n if len(crit_x) and len(crit_y):\n fig.add_trace(\n go.Scatter(\n x=Q_(crit_x, \"rad/s\").to(frequency_units).m,\n y=Q_(crit_y, \"rad/s\").to(frequency_units).m,\n mode=\"markers\",\n marker=dict(symbol=\"x\", color=\"black\"),\n name=\"Crit. Speed\",\n legendgroup=\"Crit. Speed\",\n showlegend=True,\n hovertemplate=(\n f\"Frequency ({frequency_units}): %{{x:.2f}}<br>Critical Speed ({frequency_units}): %{{y:.2f}}\"\n ),\n )\n )\n\n scatter_marker = [\"triangle-up\", \"circle\", \"triangle-down\"]\n for mark, whirl_dir, legend in zip(\n scatter_marker, [0.0, 0.5, 1.0], [\"Foward\", \"Mixed\", \"Backward\"]\n ):\n for i in range(num_frequencies):\n w_i = wd[:, i]\n whirl_i = whirl[:, i]\n damping_values_i = damping_values[:, i]\n\n whirl_mask = whirl_i == whirl_dir\n mask = whirl_mask\n if frequency_range is not None:\n frequency_mask = (w_i > frequency_range[0]) & (\n w_i < frequency_range[1]\n )\n mask = mask & frequency_mask\n if damping_range is not None:\n damping_mask = (damping_values_i > damping_range[0]) & (\n damping_values_i < damping_range[1]\n )\n mask = mask & damping_mask\n\n if any(check for check in mask):\n fig.add_trace(\n go.Scatter(\n x=Q_(speed_range[mask], \"rad/s\").to(frequency_units).m,\n y=Q_(w_i[mask], \"rad/s\").to(frequency_units).m,\n marker=dict(\n symbol=mark,\n color=damping_values_i[mask],\n coloraxis=\"coloraxis\",\n ),\n mode=\"markers\",\n name=legend,\n legendgroup=legend,\n showlegend=False,\n hoverinfo=\"none\",\n )\n )\n else:\n continue\n\n for j, h in enumerate(harmonics):\n fig.add_trace(\n go.Scatter(\n x=Q_(speed_range, \"rad/s\").to(frequency_units).m,\n y=h * Q_(speed_range, \"rad/s\").to(frequency_units).m,\n mode=\"lines\",\n line=dict(dash=\"dashdot\", color=list(tableau_colors)[j]),\n name=\"{}x speed\".format(h),\n hoverinfo=\"none\",\n )\n )\n # turn legend glyphs black\n scatter_marker = [\"triangle-up\", \"circle\", \"triangle-down\"]\n legends = [\"Foward\", \"Mixed\", \"Backward\"]\n for mark, legend in zip(scatter_marker, legends):\n fig.add_trace(\n go.Scatter(\n x=[0],\n y=[0],\n mode=\"markers\",\n name=legend,\n legendgroup=legend,\n marker=dict(symbol=mark, color=\"black\"),\n hoverinfo=\"none\",\n )\n )\n\n fig.update_xaxes(\n title_text=f\"Frequency ({frequency_units})\",\n range=[\n np.min(Q_(speed_range, \"rad/s\").to(frequency_units).m),\n np.max(Q_(speed_range, \"rad/s\").to(frequency_units).m),\n ],\n exponentformat=\"none\",\n )\n fig.update_yaxes(\n title_text=f\"Natural Frequencies ({frequency_units})\",\n range=[0, 1.1 * np.max(Q_(wd, \"rad/s\").to(frequency_units).m)],\n )\n fig.update_layout(\n legend=dict(\n itemsizing=\"constant\",\n orientation=\"h\",\n xanchor=\"center\",\n x=0.5,\n yanchor=\"bottom\",\n y=-0.3,\n ),\n **kwargs,\n )\n\n return fig\n\n\nclass FrequencyResponseResults(Results):\n \"\"\"Class used to store results and provide plots for Frequency Response.\n\n Parameters\n ----------\n freq_resp : array\n Array with the frequency response (displacement).\n velc_resp : array\n Array with the frequency response (velocity).\n accl_resp : array\n Array with the frequency response (acceleration).\n speed_range : array\n Array with the speed range in rad/s.\n number_dof : int\n Number of degrees of freedom per node.\n\n Returns\n -------\n subplots : Plotly graph_objects.make_subplots()\n Plotly figure with Amplitude vs Frequency and Phase vs Frequency plots.\n \"\"\"\n\n def __init__(self, freq_resp, velc_resp, accl_resp, speed_range, number_dof):\n self.freq_resp = freq_resp\n self.velc_resp = velc_resp\n self.accl_resp = accl_resp\n self.speed_range = speed_range\n self.number_dof = number_dof\n\n if self.number_dof == 4:\n self.dof_dict = {\"0\": \"x\", \"1\": \"y\", \"2\": \"α\", \"3\": \"β\"}\n elif self.number_dof == 6:\n self.dof_dict = {\"0\": \"x\", \"1\": \"y\", \"2\": \"z\", \"4\": \"α\", \"5\": \"β\", \"6\": \"θ\"}\n\n def plot_magnitude(\n self,\n inp,\n out,\n frequency_units=\"rad/s\",\n amplitude_units=\"m/N\",\n fig=None,\n **mag_kwargs,\n ):\n \"\"\"Plot frequency response (magnitude) using Plotly.\n\n This method plots the frequency response magnitude given an output and\n an input using Plotly.\n It is possible to plot displacement, velocity and accelaration responses,\n depending on the unit entered in 'amplitude_units'. If '[length]/[force]',\n it displays the displacement; If '[speed]/[force]', it displays the velocity;\n If '[acceleration]/[force]', it displays the acceleration.\n\n Parameters\n ----------\n inp : int\n Input.\n out : int\n Output.\n frequency_units : str, optional\n Units for the x axis.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m/N\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm/N pkpk')\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n mag_kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n inpn = inp // self.number_dof\n idof = self.dof_dict[str(inp % self.number_dof)]\n outn = out // self.number_dof\n odof = self.dof_dict[str(out % self.number_dof)]\n\n frequency_range = Q_(self.speed_range, \"rad/s\").to(frequency_units).m\n\n dummy_var = Q_(1, amplitude_units)\n if dummy_var.check(\"[length]/[force]\"):\n mag = np.abs(self.freq_resp)\n mag = Q_(mag, \"m/N\").to(amplitude_units).m\n y_label = \"Displacement\"\n elif dummy_var.check(\"[speed]/[force]\"):\n mag = np.abs(self.velc_resp)\n mag = Q_(mag, \"m/s/N\").to(amplitude_units).m\n y_label = \"Velocity\"\n elif dummy_var.check(\"[acceleration]/[force]\"):\n mag = np.abs(self.accl_resp)\n mag = Q_(mag, \"m/s**2/N\").to(amplitude_units).m\n y_label = \"Acceleration\"\n else:\n raise ValueError(\n \"Not supported unit. Options are '[length]/[force]', '[speed]/[force]', '[acceleration]/[force]'\"\n )\n\n if fig is None:\n fig = go.Figure()\n idx = len(fig.data)\n\n fig.add_trace(\n go.Scatter(\n x=frequency_range,\n y=mag[inp, out, :],\n mode=\"lines\",\n line=dict(color=list(tableau_colors)[idx]),\n name=f\"inp: node {inpn} | dof: {idof}<br>out: node {outn} | dof: {odof}\",\n legendgroup=f\"inp: node {inpn} | dof: {idof}<br>out: node {outn} | dof: {odof}\",\n showlegend=True,\n hovertemplate=f\"Frequency ({frequency_units}): %{{x:.2f}}<br>Amplitude ({amplitude_units}): %{{y:.2e}}\",\n )\n )\n\n fig.update_xaxes(\n title_text=f\"Frequency ({frequency_units})\",\n range=[np.min(frequency_range), np.max(frequency_range)],\n )\n fig.update_yaxes(title_text=f\"{y_label} ({amplitude_units})\")\n fig.update_layout(**mag_kwargs)\n\n return fig\n\n def plot_phase(\n self,\n inp,\n out,\n frequency_units=\"rad/s\",\n amplitude_units=\"m/N\",\n phase_units=\"rad\",\n fig=None,\n **phase_kwargs,\n ):\n \"\"\"Plot frequency response (phase) using Plotly.\n\n This method plots the frequency response phase given an output and\n an input using Plotly.\n\n Parameters\n ----------\n inp : int\n Input.\n out : int\n Output.\n frequency_units : str, optional\n Units for the x axis.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m/N\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm/N pkpk')\n phase_units : str, optional\n Units for the x axis.\n Default is \"rad\"\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n phase_kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n inpn = inp // self.number_dof\n idof = self.dof_dict[str(inp % self.number_dof)]\n outn = out // self.number_dof\n odof = self.dof_dict[str(out % self.number_dof)]\n\n frequency_range = Q_(self.speed_range, \"rad/s\").to(frequency_units).m\n\n dummy_var = Q_(1, amplitude_units)\n if dummy_var.check(\"[length]/[force]\"):\n phase = np.angle(self.freq_resp[inp, out, :])\n elif dummy_var.check(\"[speed]/[force]\"):\n phase = np.angle(self.velc_resp[inp, out, :])\n elif dummy_var.check(\"[acceleration]/[force]\"):\n phase = np.angle(self.accl_resp[inp, out, :])\n else:\n raise ValueError(\n \"Not supported unit. Options are '[length]/[force]', '[speed]/[force]', '[acceleration]/[force]'\"\n )\n\n phase = Q_(phase, \"rad\").to(phase_units).m\n\n if phase_units in [\"rad\", \"radian\", \"radians\"]:\n phase = [i + 2 * np.pi if i < 0 else i for i in phase]\n else:\n phase = [i + 360 if i < 0 else i for i in phase]\n\n if fig is None:\n fig = go.Figure()\n idx = len(fig.data)\n\n fig.add_trace(\n go.Scatter(\n x=frequency_range,\n y=phase,\n mode=\"lines\",\n line=dict(color=list(tableau_colors)[idx]),\n name=f\"inp: node {inpn} | dof: {idof}<br>out: node {outn} | dof: {odof}\",\n legendgroup=f\"inp: node {inpn} | dof: {idof}<br>out: node {outn} | dof: {odof}\",\n showlegend=True,\n hovertemplate=f\"Frequency ({frequency_units}): %{{x:.2f}}<br>Phase: %{{y:.2e}}\",\n )\n )\n\n fig.update_xaxes(\n title_text=f\"Frequency ({frequency_units})\",\n range=[np.min(frequency_range), np.max(frequency_range)],\n )\n fig.update_yaxes(title_text=f\"Phase ({phase_units})\")\n fig.update_layout(**phase_kwargs)\n\n return fig\n\n def plot_polar_bode(\n self,\n inp,\n out,\n frequency_units=\"rad/s\",\n amplitude_units=\"m/N\",\n phase_units=\"rad\",\n fig=None,\n **polar_kwargs,\n ):\n \"\"\"Plot frequency response (polar) using Plotly.\n\n This method plots the frequency response (polar graph) given an output and\n an input using Plotly.\n It is possible to plot displacement, velocity and accelaration responses,\n depending on the unit entered in 'amplitude_units'. If '[length]/[force]',\n it displays the displacement; If '[speed]/[force]', it displays the velocity;\n If '[acceleration]/[force]', it displays the acceleration.\n\n Parameters\n ----------\n inp : int\n Input.\n out : int\n Output.\n frequency_units : str, optional\n Units for the x axis.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m/N\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm/N pkpk')\n phase_units : str, optional\n Units for the x axis.\n Default is \"rad\"\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n polar_kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n inpn = inp // self.number_dof\n idof = self.dof_dict[str(inp % self.number_dof)]\n outn = out // self.number_dof\n odof = self.dof_dict[str(out % self.number_dof)]\n\n frequency_range = Q_(self.speed_range, \"rad/s\").to(frequency_units).m\n\n dummy_var = Q_(1, amplitude_units)\n if dummy_var.check(\"[length]/[force]\"):\n mag = np.abs(self.freq_resp[inp, out, :])\n mag = Q_(mag, \"m/N\").to(amplitude_units).m\n phase = np.angle(self.freq_resp[inp, out, :])\n y_label = \"Displacement\"\n elif dummy_var.check(\"[speed]/[force]\"):\n mag = np.abs(self.velc_resp[inp, out, :])\n mag = Q_(mag, \"m/s/N\").to(amplitude_units).m\n phase = np.angle(self.velc_resp[inp, out, :])\n y_label = \"Velocity\"\n elif dummy_var.check(\"[acceleration]/[force]\"):\n mag = np.abs(self.accl_resp[inp, out, :])\n mag = Q_(mag, \"m/s**2/N\").to(amplitude_units).m\n phase = np.angle(self.accl_resp[inp, out, :])\n y_label = \"Acceleration\"\n else:\n raise ValueError(\n \"Not supported unit. Options are '[length]/[force]', '[speed]/[force]', '[acceleration]/[force]'\"\n )\n\n phase = Q_(phase, \"rad\").to(phase_units).m\n\n if phase_units in [\"rad\", \"radian\", \"radians\"]:\n polar_theta_unit = \"radians\"\n phase = [i + 2 * np.pi if i < 0 else i for i in phase]\n else:\n polar_theta_unit = \"degrees\"\n phase = [i + 360 if i < 0 else i for i in phase]\n\n if fig is None:\n fig = go.Figure()\n idx = len(fig.data)\n\n fig.add_trace(\n go.Scatterpolar(\n r=mag,\n theta=phase,\n customdata=frequency_range,\n thetaunit=polar_theta_unit,\n mode=\"lines+markers\",\n marker=dict(color=list(tableau_colors)[idx]),\n line=dict(color=list(tableau_colors)[idx]),\n name=f\"inp: node {inpn} | dof: {idof}<br>out: node {outn} | dof: {odof}\",\n legendgroup=f\"inp: node {inpn} | dof: {idof}<br>out: node {outn} | dof: {odof}\",\n showlegend=True,\n hovertemplate=f\"Amplitude ({amplitude_units}): %{{r:.2e}}<br>Phase: %{{theta:.2f}}<br>Frequency ({frequency_units}): %{{customdata:.2f}}\",\n )\n )\n\n fig.update_layout(\n polar=dict(\n radialaxis=dict(\n title=dict(text=f\"{y_label} ({amplitude_units})\"),\n exponentformat=\"power\",\n ),\n angularaxis=dict(thetaunit=polar_theta_unit),\n ),\n **polar_kwargs,\n )\n\n return fig\n\n def plot(\n self,\n inp,\n out,\n frequency_units=\"rad/s\",\n amplitude_units=\"m/N\",\n phase_units=\"rad\",\n fig=None,\n mag_kwargs=None,\n phase_kwargs=None,\n polar_kwargs=None,\n fig_kwargs=None,\n ):\n \"\"\"Plot frequency response.\n\n This method plots the frequency response given an output and an input\n using Plotly.\n\n This method returns a subplot with:\n - Frequency vs Amplitude;\n - Frequency vs Phase Angle;\n - Polar plot Amplitude vs Phase Angle;\n\n Amplitude can be displacement, velocity or accelaration responses,\n depending on the unit entered in 'amplitude_units'. If '[length]/[force]',\n it displays the displacement; If '[speed]/[force]', it displays the velocity;\n If '[acceleration]/[force]', it displays the acceleration.\n\n Parameters\n ----------\n inp : int\n Input.\n out : int\n Output.\n frequency_units : str, optional\n Units for the x axis.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m/N\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm/N pkpk')\n phase_units : str, optional\n Units for the x axis.\n Default is \"rad\"\n mag_kwargs : optional\n Additional key word arguments can be passed to change the magnitude plot\n layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n phase_kwargs : optional\n Additional key word arguments can be passed to change the phase plot\n layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n polar_kwargs : optional\n Additional key word arguments can be passed to change the polar plot\n layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n fig_kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...). This kwargs override \"mag_kwargs\",\n \"phase_kwargs\" and \"polar_kwargs\" dictionaries.\n *See Plotly Python make_subplots Reference for more information.\n\n Returns\n -------\n subplots : Plotly graph_objects.make_subplots()\n Plotly figure with Amplitude vs Frequency and Phase vs Frequency and\n polar Amplitude vs Phase plots.\n \"\"\"\n mag_kwargs = {} if mag_kwargs is None else copy.copy(mag_kwargs)\n phase_kwargs = {} if phase_kwargs is None else copy.copy(phase_kwargs)\n polar_kwargs = {} if polar_kwargs is None else copy.copy(polar_kwargs)\n fig_kwargs = {} if fig_kwargs is None else copy.copy(fig_kwargs)\n\n fig0 = self.plot_magnitude(\n inp, out, frequency_units, amplitude_units, None, **mag_kwargs\n )\n fig1 = self.plot_phase(\n inp,\n out,\n frequency_units,\n amplitude_units,\n phase_units,\n None,\n **phase_kwargs,\n )\n fig2 = self.plot_polar_bode(\n inp,\n out,\n frequency_units,\n amplitude_units,\n phase_units,\n None,\n **polar_kwargs,\n )\n\n if fig is None:\n fig = make_subplots(\n rows=2,\n cols=2,\n specs=[[{}, {\"type\": \"polar\", \"rowspan\": 2}], [{}, None]],\n )\n\n for data in fig0[\"data\"]:\n fig.add_trace(data, row=1, col=1)\n for data in fig1[\"data\"]:\n data.showlegend = False\n fig.add_trace(data, row=2, col=1)\n for data in fig2[\"data\"]:\n data.showlegend = False\n fig.add_trace(data, row=1, col=2)\n\n fig.update_xaxes(fig0.layout.xaxis, row=1, col=1)\n fig.update_yaxes(fig0.layout.yaxis, row=1, col=1)\n fig.update_xaxes(fig1.layout.xaxis, row=2, col=1)\n fig.update_yaxes(fig1.layout.yaxis, row=2, col=1)\n fig.update_layout(\n polar=dict(\n radialaxis=fig2.layout.polar.radialaxis,\n angularaxis=fig2.layout.polar.angularaxis,\n ),\n **fig_kwargs,\n )\n\n return fig\n\n\nclass ForcedResponseResults(Results):\n \"\"\"Class used to store results and provide plots for Forced Response analysis.\n\n Parameters\n ----------\n rotor : ross.Rotor object\n The Rotor object\n force_resp : array\n Array with the forced response (displacement) for each node for each frequency.\n velc_resp : array\n Array with the forced response (velocity) for each node for each frequency.\n accl_resp : array\n Array with the forced response (acceleration) for each node for each frequency.\n speed_range : array\n Array with the frequencies.\n unbalance : array, optional\n Array with the unbalance data (node, magnitude and phase) to be plotted\n with deflected shape. This argument is set only if running an unbalance\n response analysis.\n Default is None.\n\n Returns\n -------\n subplots : Plotly graph_objects.make_subplots()\n Plotly figure with Amplitude vs Frequency and Phase vs Frequency plots.\n \"\"\"\n\n def __init__(\n self, rotor, forced_resp, velc_resp, accl_resp, speed_range, unbalance=None\n ):\n self.rotor = rotor\n self.forced_resp = forced_resp\n self.velc_resp = velc_resp\n self.accl_resp = accl_resp\n self.speed_range = speed_range\n self.unbalance = unbalance\n\n self.default_units = {\n \"[length]\": [\"m\", \"forced_resp\"],\n \"[length] / [time]\": [\"m/s\", \"velc_resp\"],\n \"[length] / [time] ** 2\": [\"m/s**2\", \"accl_resp\"],\n }\n\n def data_magnitude(\n self,\n probe,\n probe_units=\"rad\",\n frequency_units=\"rad/s\",\n amplitude_units=\"m\",\n ):\n \"\"\"Return the forced response (magnitude) in DataFrame format.\n\n Parameters\n ----------\n probe : list\n List with tuples (node, orientation angle, tag).\n\n node : int -> Indicate the node where the probe is located.\n\n orientation : float -> Probe orientation angle about the shaft.\n The 0 refers to +X direction.\n The strings 'major' and 'minor' can also be used to reference the major\n and minor axis.\n\n tag : str, optional -> Probe tag to be add a DataFrame column title.\n probe_units : str, option\n Units for probe orientation.\n Default is \"rad\".\n frequency_units : str, optional\n Units for the frequency range.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm pkpk')\n\n Returns\n -------\n df : pd.DataFrame\n DataFrame storing magnitude data arrays. The columns are set based on the\n probe's tag.\n \"\"\"\n frequency_range = Q_(self.speed_range, \"rad/s\").to(frequency_units).m\n\n unit_type = str(Q_(1, amplitude_units).dimensionality)\n try:\n base_unit = self.default_units[unit_type][0]\n except KeyError:\n raise ValueError(\n \"Not supported unit. Dimensionality options are '[length]', '[speed]', '[acceleration]'\"\n )\n\n data = {}\n data[\"frequency\"] = frequency_range\n\n for i, p in enumerate(probe):\n angle = Q_(p[1], probe_units).to(\"rad\").m\n vector = self._calculate_major_axis_per_node(\n node=p[0], angle=angle, amplitude_units=amplitude_units\n )[3]\n try:\n probe_tag = p[2]\n except IndexError:\n probe_tag = f\"Probe {i+1} - Node {p[0]}\"\n\n data[probe_tag] = Q_(np.abs(vector), base_unit).to(amplitude_units).m\n\n df = pd.DataFrame(data)\n\n return df\n\n def data_phase(\n self,\n probe,\n probe_units=\"rad\",\n frequency_units=\"rad/s\",\n amplitude_units=\"m\",\n phase_units=\"rad\",\n ):\n \"\"\"Return the forced response (phase) in DataFrame format.\n\n Parameters\n ----------\n probe : list\n List with tuples (node, orientation angle, tag).\n\n node : int -> Indicate the node where the probe is located.\n\n orientation : float -> Probe orientation angle about the shaft.\n The 0 refers to +X direction.\n The strings 'major' and 'minor' can also be used to reference the major\n and minor axis.\n\n tag : str, optional -> Probe tag to be add a DataFrame column title.\n probe_units : str, option\n Units for probe orientation.\n Default is \"rad\".\n frequency_units : str, optional\n Units for the x axis.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm pkpk')\n phase_units : str, optional\n Units for the x axis.\n Default is \"rad\"\n\n Returns\n -------\n df : pd.DataFrame\n DataFrame storing phase data arrays. They columns are set based on the\n probe's tag.\n \"\"\"\n frequency_range = Q_(self.speed_range, \"rad/s\").to(frequency_units).m\n\n data = {}\n data[\"frequency\"] = frequency_range\n\n for i, p in enumerate(probe):\n angle = Q_(p[1], probe_units).to(\"rad\").m\n vector = self._calculate_major_axis_per_node(\n node=p[0], angle=angle, amplitude_units=amplitude_units\n )[4]\n\n probe_phase = np.real(vector)\n probe_phase = np.array([i + 2 * np.pi if i < 0 else i for i in probe_phase])\n probe_phase = Q_(probe_phase, \"rad\").to(phase_units).m\n\n try:\n probe_tag = p[2]\n except IndexError:\n probe_tag = f\"Probe {i+1} - Node {p[0]}\"\n\n data[probe_tag] = probe_phase\n\n df = pd.DataFrame(data)\n\n return df\n\n def plot_magnitude(\n self,\n probe,\n probe_units=\"rad\",\n frequency_units=\"rad/s\",\n amplitude_units=\"m\",\n fig=None,\n **kwargs,\n ):\n \"\"\"Plot forced response (magnitude) using Plotly.\n\n Parameters\n ----------\n probe : list\n List with tuples (node, orientation angle, tag).\n\n node : int -> Indicate the node where the probe is located.\n\n orientation : float -> Probe orientation angle about the shaft.\n The 0 refers to +X direction.\n The strings 'major' and 'minor' can also be used to reference the major\n and minor axis.\n\n tag : str, optional -> Probe tag to be add a DataFrame column title.\n probe_units : str, option\n Units for probe orientation.\n Default is \"rad\".\n frequency_units : str, optional\n Units for the x axis.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm pkpk')\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n df = self.data_magnitude(probe, probe_units, frequency_units, amplitude_units)\n\n if fig is None:\n fig = go.Figure()\n\n for i, column in enumerate(df.columns[1:]):\n fig.add_trace(\n go.Scatter(\n x=df[\"frequency\"],\n y=df[column],\n mode=\"lines\",\n line=dict(color=list(tableau_colors)[i]),\n name=column,\n legendgroup=column,\n showlegend=True,\n hovertemplate=f\"Frequency ({frequency_units}): %{{x:.2f}}<br>Amplitude ({amplitude_units}): %{{y:.2e}}\",\n )\n )\n\n fig.update_xaxes(\n title_text=f\"Frequency ({frequency_units})\",\n range=[np.min(df[\"frequency\"]), np.max(df[\"frequency\"])],\n )\n fig.update_yaxes(\n title_text=f\"Amplitude ({amplitude_units})\", exponentformat=\"power\"\n )\n fig.update_layout(**kwargs)\n\n return fig\n\n def plot_phase(\n self,\n probe,\n probe_units=\"rad\",\n frequency_units=\"rad/s\",\n amplitude_units=\"m\",\n phase_units=\"rad\",\n fig=None,\n **kwargs,\n ):\n \"\"\"Plot forced response (phase) using Plotly.\n\n Parameters\n ----------\n probe : list\n List with tuples (node, orientation angle, tag).\n\n node : int -> Indicate the node where the probe is located.\n\n orientation : float -> Probe orientation angle about the shaft.\n The 0 refers to +X direction.\n The strings 'major' and 'minor' can also be used to reference the major\n and minor axis.\n\n tag : str, optional -> Probe tag to be add a DataFrame column title.\n probe_units : str, option\n Units for probe orientation.\n Default is \"rad\".\n frequency_units : str, optional\n Units for the x axis.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm pkpk')\n phase_units : str, optional\n Units for the x axis.\n Default is \"rad\"\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n df = self.data_phase(\n probe, probe_units, frequency_units, amplitude_units, phase_units\n )\n\n if fig is None:\n fig = go.Figure()\n\n for i, column in enumerate(df.columns[1:]):\n fig.add_trace(\n go.Scatter(\n x=df[\"frequency\"],\n y=df[column],\n mode=\"lines\",\n line=dict(color=list(tableau_colors)[i]),\n name=column,\n legendgroup=column,\n showlegend=True,\n hovertemplate=f\"Frequency ({frequency_units}): %{{x:.2f}}<br>Phase ({phase_units}): %{{y:.2e}}\",\n )\n )\n\n fig.update_xaxes(\n title_text=f\"Frequency ({frequency_units})\",\n range=[np.min(df[\"frequency\"]), np.max(df[\"frequency\"])],\n )\n fig.update_yaxes(title_text=f\"Phase ({phase_units})\")\n fig.update_layout(**kwargs)\n\n return fig\n\n def plot_polar_bode(\n self,\n probe,\n probe_units=\"rad\",\n frequency_units=\"rad/s\",\n amplitude_units=\"m\",\n phase_units=\"rad\",\n fig=None,\n **kwargs,\n ):\n \"\"\"Plot polar forced response using Plotly.\n\n Parameters\n ----------\n probe : list\n List with tuples (node, orientation angle, tag).\n\n node : int -> Indicate the node where the probe is located.\n\n orientation : float -> Probe orientation angle about the shaft.\n The 0 refers to +X direction.\n The strings 'major' and 'minor' can also be used to reference the major\n and minor axis.\n\n tag : str, optional -> Probe tag to be add a DataFrame column title.\n probe_units : str, option\n Units for probe orientation.\n Default is \"rad\".\n frequency_units : str, optional\n Units for the x axis.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm pkpk')\n phase_units : str, optional\n Units for the x axis.\n Default is \"rad\"\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n df_m = self.data_magnitude(probe, probe_units, frequency_units, amplitude_units)\n df_p = self.data_phase(\n probe, probe_units, frequency_units, amplitude_units, phase_units\n )\n\n if fig is None:\n fig = go.Figure()\n\n if phase_units in [\"rad\", \"radian\", \"radians\"]:\n polar_theta_unit = \"radians\"\n elif phase_units in [\"degree\", \"degrees\", \"deg\"]:\n polar_theta_unit = \"degrees\"\n\n for i, column in enumerate(df_m.columns[1:]):\n fig.add_trace(\n go.Scatterpolar(\n r=df_m[column],\n theta=df_p[column],\n customdata=df_m[\"frequency\"],\n thetaunit=polar_theta_unit,\n mode=\"lines+markers\",\n marker=dict(color=list(tableau_colors)[i]),\n line=dict(color=list(tableau_colors)[i]),\n name=column,\n legendgroup=column,\n showlegend=True,\n hovertemplate=f\"Amplitude ({amplitude_units}): %{{r:.2e}}<br>Phase: %{{theta:.2f}}<br>Frequency ({frequency_units}): %{{customdata:.2f}}\",\n )\n )\n\n fig.update_layout(\n polar=dict(\n radialaxis=dict(\n title=dict(text=f\"Amplitude ({amplitude_units})\"),\n exponentformat=\"power\",\n ),\n angularaxis=dict(thetaunit=polar_theta_unit),\n ),\n **kwargs,\n )\n\n return fig\n\n def plot(\n self,\n probe,\n probe_units=\"rad\",\n frequency_units=\"rad/s\",\n amplitude_units=\"m\",\n phase_units=\"rad\",\n mag_kwargs=None,\n phase_kwargs=None,\n polar_kwargs=None,\n subplot_kwargs=None,\n ):\n \"\"\"Plot forced response.\n\n This method returns a subplot with:\n - Frequency vs Amplitude;\n - Frequency vs Phase Angle;\n - Polar plot Amplitude vs Phase Angle;\n\n Parameters\n ----------\n probe : list\n List with tuples (node, orientation angle, tag).\n\n node : int -> Indicate the node where the probe is located.\n\n orientation : float -> Probe orientation angle about the shaft.\n The 0 refers to +X direction.\n The strings 'major' and 'minor' can also be used to reference the major\n and minor axis.\n\n tag : str, optional -> Probe tag to be add a DataFrame column title.\n probe_units : str, option\n Units for probe orientation.\n Default is \"rad\".\n frequency_units : str, optional\n Frequency units.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm pkpk')\n phase_units : str, optional\n Phase units.\n Default is \"rad\"\n mag_kwargs : optional\n Additional key word arguments can be passed to change the magnitude plot\n layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n phase_kwargs : optional\n Additional key word arguments can be passed to change the phase plot\n layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n polar_kwargs : optional\n Additional key word arguments can be passed to change the polar plot\n layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n subplot_kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...). This kwargs override \"mag_kwargs\" and\n \"phase_kwargs\" dictionaries.\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n subplots : Plotly graph_objects.make_subplots()\n Plotly figure with Amplitude vs Frequency and Phase vs Frequency and\n polar Amplitude vs Phase plots.\n \"\"\"\n mag_kwargs = {} if mag_kwargs is None else copy.copy(mag_kwargs)\n phase_kwargs = {} if phase_kwargs is None else copy.copy(phase_kwargs)\n polar_kwargs = {} if polar_kwargs is None else copy.copy(polar_kwargs)\n subplot_kwargs = {} if subplot_kwargs is None else copy.copy(subplot_kwargs)\n\n # fmt: off\n fig0 = self.plot_magnitude(\n probe, probe_units, frequency_units, amplitude_units, **mag_kwargs\n )\n fig1 = self.plot_phase(\n probe, probe_units, frequency_units, amplitude_units, phase_units, **phase_kwargs\n )\n fig2 = self.plot_polar_bode(\n probe, probe_units, frequency_units, amplitude_units, phase_units, **polar_kwargs\n )\n # fmt: on\n\n subplots = make_subplots(\n rows=2,\n cols=2,\n specs=[[{}, {\"type\": \"polar\", \"rowspan\": 2}], [{}, None]],\n shared_xaxes=True,\n vertical_spacing=0.02,\n )\n for data in fig0[\"data\"]:\n data.showlegend = False\n subplots.add_trace(data, row=1, col=1)\n for data in fig1[\"data\"]:\n data.showlegend = False\n subplots.add_trace(data, row=2, col=1)\n for data in fig2[\"data\"]:\n subplots.add_trace(data, row=1, col=2)\n\n subplots.update_yaxes(fig0.layout.yaxis, row=1, col=1)\n subplots.update_xaxes(fig1.layout.xaxis, row=2, col=1)\n subplots.update_yaxes(fig1.layout.yaxis, row=2, col=1)\n subplots.update_layout(\n polar=dict(\n radialaxis=fig2.layout.polar.radialaxis,\n angularaxis=fig2.layout.polar.angularaxis,\n ),\n **subplot_kwargs,\n )\n\n return subplots\n\n def _calculate_major_axis_per_node(self, node, angle, amplitude_units=\"m\"):\n \"\"\"Calculate the major axis for a node for each frequency.\n\n Parameters\n ----------\n node : float\n A node from the rotor model.\n angle : float, str\n The orientation angle of the axis.\n Options are:\n\n float : angle in rad capture the response in a probe orientation;\n\n str : \"major\" to capture the response for the major axis;\n\n str : \"minor\" to capture the response for the minor axis.\n\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm pkpk')\n\n Returns\n -------\n major_axis_vector : np.ndarray\n major_axis_vector[0, :] = foward vector\n major_axis_vector[1, :] = backward vector\n major_axis_vector[2, :] = axis angle\n major_axis_vector[3, :] = axis vector response for the input angle\n major_axis_vector[4, :] = phase response for the input angle\n \"\"\"\n ndof = self.rotor.number_dof\n nodes = self.rotor.nodes\n link_nodes = self.rotor.link_nodes\n\n unit_type = str(Q_(1, amplitude_units).dimensionality)\n try:\n response = self.__dict__[self.default_units[unit_type][1]]\n except KeyError:\n raise ValueError(\n \"Not supported unit. Dimensionality options are '[length]', '[speed]', '[acceleration]'\"\n )\n\n major_axis_vector = np.zeros((5, len(self.speed_range)), dtype=complex)\n\n fix_dof = (node - nodes[-1] - 1) * ndof // 2 if node in link_nodes else 0\n dofx = ndof * node - fix_dof\n dofy = ndof * node + 1 - fix_dof\n\n # Relative angle between probes (90°)\n Rel_ang = np.exp(1j * np.pi / 2)\n\n for i, f in enumerate(self.speed_range):\n\n # Foward and Backward vectors\n fow = response[dofx, i] / 2 + Rel_ang * response[dofy, i] / 2\n back = (\n np.conj(response[dofx, i]) / 2\n + Rel_ang * np.conj(response[dofy, i]) / 2\n )\n\n ang_fow = np.angle(fow)\n if ang_fow < 0:\n ang_fow += 2 * np.pi\n\n ang_back = np.angle(back)\n if ang_back < 0:\n ang_back += 2 * np.pi\n\n if angle == \"major\":\n # Major axis angle\n axis_angle = (ang_back - ang_fow) / 2\n if axis_angle > np.pi:\n axis_angle -= np.pi\n\n elif angle == \"minor\":\n # Minor axis angle\n axis_angle = (ang_back - ang_fow + np.pi) / 2\n if axis_angle > np.pi:\n axis_angle -= np.pi\n\n else:\n axis_angle = angle\n\n major_axis_vector[0, i] = fow\n major_axis_vector[1, i] = back\n major_axis_vector[2, i] = axis_angle\n major_axis_vector[3, i] = np.abs(\n fow * np.exp(1j * axis_angle) + back * np.exp(-1j * axis_angle)\n )\n major_axis_vector[4, i] = np.angle(\n fow * np.exp(1j * axis_angle) + back * np.exp(-1j * axis_angle)\n )\n\n return major_axis_vector\n\n def _calculate_major_axis_per_speed(self, speed, amplitude_units=\"m\"):\n \"\"\"Calculate the major axis for each nodal orbit.\n\n Parameters\n ----------\n speed : float\n The rotor rotation speed. Must be an element from the speed_range argument\n passed to the class (rad/s).\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm pkpk')\n\n Returns\n -------\n major_axis_vector : np.ndarray\n major_axis_vector[0, :] = foward vector\n major_axis_vector[1, :] = backward vector\n major_axis_vector[2, :] = major axis angle\n major_axis_vector[3, :] = major axis vector for the maximum major axis angle\n major_axis_vector[4, :] = absolute values for major axes vectors\n \"\"\"\n nodes = self.rotor.nodes\n ndof = self.rotor.number_dof\n\n major_axis_vector = np.zeros((5, len(nodes)), dtype=complex)\n idx = np.where(np.isclose(self.speed_range, speed, atol=1e-6))[0][0]\n\n unit_type = str(Q_(1, amplitude_units).dimensionality)\n try:\n response = self.__dict__[self.default_units[unit_type][1]]\n except KeyError:\n raise ValueError(\n \"Not supported unit. Dimensionality options are '[length]', '[speed]', '[acceleration]'\"\n )\n\n for i, n in enumerate(nodes):\n dofx = ndof * n\n dofy = ndof * n + 1\n\n # Relative angle between probes (90°)\n Rel_ang = np.exp(1j * np.pi / 2)\n\n # Foward and Backward vectors\n fow = response[dofx, idx] / 2 + Rel_ang * response[dofy, idx] / 2\n back = (\n np.conj(response[dofx, idx]) / 2\n + Rel_ang * np.conj(response[dofy, idx]) / 2\n )\n\n ang_fow = np.angle(fow)\n if ang_fow < 0:\n ang_fow += 2 * np.pi\n\n ang_back = np.angle(back)\n if ang_back < 0:\n ang_back += 2 * np.pi\n\n # Major axis angles\n ang_maj_ax = (ang_back - ang_fow) / 2\n\n # Adjusting points to the same quadrant\n if ang_maj_ax > np.pi:\n ang_maj_ax -= np.pi\n\n major_axis_vector[0, i] = fow\n major_axis_vector[1, i] = back\n major_axis_vector[2, i] = ang_maj_ax\n\n max_major_axis_angle = np.max(major_axis_vector[2])\n\n # fmt: off\n major_axis_vector[3] = (\n major_axis_vector[0] * np.exp(1j * max_major_axis_angle) +\n major_axis_vector[1] * np.exp(-1j * max_major_axis_angle)\n )\n major_axis_vector[4] = np.abs(\n major_axis_vector[0] * np.exp(1j * major_axis_vector[2]) +\n major_axis_vector[1] * np.exp(-1j * major_axis_vector[2])\n )\n # fmt: on\n\n return major_axis_vector\n\n def _calculate_bending_moment(self, speed):\n \"\"\"Calculate the bending moment in X and Y directions.\n\n This method calculate forces and moments on nodal positions for a deflected\n shape configuration.\n\n Parameters\n ----------\n speed : float\n The rotor rotation speed. Must be an element from the speed_range argument\n passed to the class (rad/s).\n\n Returns\n -------\n Mx : array\n Bending Moment on X directon.\n My : array\n Bending Moment on Y directon.\n \"\"\"\n idx = np.where(np.isclose(self.speed_range, speed, atol=1e-6))[0][0]\n mag = np.abs(self.forced_resp[:, idx])\n phase = np.angle(self.forced_resp[:, idx])\n number_dof = self.rotor.number_dof\n nodes = self.rotor.nodes\n\n Mx = np.zeros_like(nodes, dtype=np.float64)\n My = np.zeros_like(nodes, dtype=np.float64)\n mag = mag * np.cos(-phase)\n\n # fmt: off\n for i, el in enumerate(self.rotor.shaft_elements):\n x = (-el.material.E * el.Ie / el.L ** 2) * np.array([\n [-6, +6, -4 * el.L, -2 * el.L],\n [+6, -6, +2 * el.L, +4 * el.L],\n ])\n response_x = np.array([\n [mag[number_dof * el.n_l + 0]],\n [mag[number_dof * el.n_r + 0]],\n [mag[number_dof * el.n_l + 3]],\n [mag[number_dof * el.n_r + 3]],\n ])\n\n Mx[[el.n_l, el.n_r]] += (x @ response_x).flatten()\n\n y = (-el.material.E * el.Ie / el.L ** 2) * np.array([\n [-6, +6, +4 * el.L, +2 * el.L],\n [+6, -6, -2 * el.L, -4 * el.L],\n ])\n response_y = np.array([\n [mag[number_dof * el.n_l + 1]],\n [mag[number_dof * el.n_r + 1]],\n [mag[number_dof * el.n_l + 2]],\n [mag[number_dof * el.n_r + 2]],\n ])\n My[[el.n_l, el.n_r]] += (y @ response_y).flatten()\n # fmt: on\n\n return Mx, My\n\n def plot_deflected_shape_2d(\n self,\n speed,\n frequency_units=\"rad/s\",\n amplitude_units=\"m\",\n rotor_length_units=\"m\",\n fig=None,\n **kwargs,\n ):\n \"\"\"Plot the 2D deflected shape diagram.\n\n Parameters\n ----------\n speed : float\n The rotor rotation speed. Must be an element from the speed_range argument\n passed to the class (rad/s).\n frequency_units : str, optional\n Frequency units.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm pkpk')\n rotor_length_units : str, optional\n Displacement units.\n Default is 'm'.\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the deflected shape\n plot layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n if not any(np.isclose(self.speed_range, speed, atol=1e-6)):\n raise ValueError(\"No data available for this speed value.\")\n\n unit_type = str(Q_(1, amplitude_units).dimensionality)\n try:\n base_unit = self.default_units[unit_type][0]\n except KeyError:\n raise ValueError(\n \"Not supported unit. Dimensionality options are '[length]', '[speed]', '[acceleration]'\"\n )\n\n nodes_pos = Q_(self.rotor.nodes_pos, \"m\").to(rotor_length_units).m\n maj_vect = self._calculate_major_axis_per_speed(speed, amplitude_units)\n maj_vect = Q_(maj_vect[4].real, base_unit).to(amplitude_units).m\n\n if fig is None:\n fig = go.Figure()\n\n fig.add_trace(\n go.Scatter(\n x=nodes_pos,\n y=maj_vect,\n mode=\"lines\",\n name=\"Major Axis\",\n legendgroup=\"Major_Axis_2d\",\n showlegend=False,\n hovertemplate=f\"Nodal Position ({rotor_length_units}): %{{x:.2f}}<br>Amplitude ({amplitude_units}): %{{y:.2e}}\",\n )\n )\n # plot center line\n fig.add_trace(\n go.Scatter(\n x=nodes_pos,\n y=np.zeros(len(nodes_pos)),\n mode=\"lines\",\n line=dict(color=\"black\", dash=\"dashdot\"),\n showlegend=False,\n hoverinfo=\"none\",\n )\n )\n\n fig.update_xaxes(title_text=f\"Rotor Length ({rotor_length_units})\")\n fig.update_yaxes(\n title_text=f\"Major Axis Abs Amplitude ({amplitude_units})\",\n title_font=dict(size=12),\n )\n fig.update_layout(**kwargs)\n\n return fig\n\n def plot_deflected_shape_3d(\n self,\n speed,\n samples=101,\n frequency_units=\"rad/s\",\n amplitude_units=\"m\",\n rotor_length_units=\"m\",\n fig=None,\n **kwargs,\n ):\n \"\"\"Plot the 3D deflected shape diagram.\n\n Parameters\n ----------\n speed : float\n The rotor rotation speed. Must be an element from the speed_range argument\n passed to the class (rad/s).\n samples : int, optional\n Number of samples to generate the orbit for each node.\n Default is 101.\n frequency_units : str, optional\n Frequency units.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm pkpk')\n rotor_length_units : str, optional\n Rotor Length units.\n Default is 'm'.\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the deflected shape\n plot layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n if not any(np.isclose(self.speed_range, speed, atol=1e-6)):\n raise ValueError(\"No data available for this speed value.\")\n\n unit_type = str(Q_(1, amplitude_units).dimensionality)\n try:\n base_unit = self.default_units[unit_type][0]\n except KeyError:\n raise ValueError(\n \"Not supported unit. Dimensionality options are '[length]', '[speed]', '[acceleration]'\"\n )\n\n mag = np.abs(self.__dict__[self.default_units[unit_type][1]])\n phase = np.angle(self.__dict__[self.default_units[unit_type][1]])\n ub = self.unbalance\n nodes = self.rotor.nodes\n nodes_pos = Q_(self.rotor.nodes_pos, \"m\").to(rotor_length_units).m\n number_dof = self.rotor.number_dof\n idx = np.where(np.isclose(self.speed_range, speed, atol=1e-6))[0][0]\n\n # orbit of a single revolution\n t = np.linspace(0, 2 * np.pi / speed, samples)\n x_pos = np.repeat(nodes_pos, t.size).reshape(len(nodes_pos), t.size)\n\n if fig is None:\n fig = go.Figure()\n\n for i, n in enumerate(nodes):\n dofx = number_dof * n\n dofy = number_dof * n + 1\n\n y = mag[dofx, idx] * np.cos(speed * t - phase[dofx, idx])\n z = mag[dofy, idx] * np.cos(speed * t - phase[dofy, idx])\n\n # plot nodal orbit\n fig.add_trace(\n go.Scatter3d(\n x=x_pos[n],\n y=Q_(y, base_unit).to(amplitude_units).m,\n z=Q_(z, base_unit).to(amplitude_units).m,\n mode=\"lines\",\n line=dict(color=\"royalblue\"),\n name=\"Orbit\",\n legendgroup=\"Orbit\",\n showlegend=False,\n hovertemplate=(\n f\"Position ({rotor_length_units}): %{{x:.2f}}<br>X - Amplitude ({amplitude_units}): %{{y:.2e}}<br>Y - Amplitude ({amplitude_units}): %{{z:.2e}}\"\n ),\n )\n )\n\n # plot major axis\n maj_vect = self._calculate_major_axis_per_speed(speed, amplitude_units)\n\n fig.add_trace(\n go.Scatter3d(\n x=x_pos[:, 0],\n y=Q_(np.real(maj_vect[3]), base_unit).to(amplitude_units).m,\n z=Q_(np.imag(maj_vect[3]), base_unit).to(amplitude_units).m,\n mode=\"lines+markers\",\n marker=dict(color=\"black\"),\n line=dict(color=\"black\", dash=\"dashdot\"),\n name=\"Major Axis\",\n legendgroup=\"Major_Axis\",\n showlegend=True,\n hovertemplate=(\n f\"Position ({rotor_length_units}): %{{x:.2f}}<br>X - Amplitude ({amplitude_units}): %{{y:.2e}}<br>Y - Amplitude ({amplitude_units}): %{{z:.2e}}\"\n ),\n )\n )\n\n # plot center line\n line = np.zeros(len(nodes_pos))\n fig.add_trace(\n go.Scatter3d(\n x=nodes_pos,\n y=line,\n z=line,\n mode=\"lines\",\n line=dict(color=\"black\", dash=\"dashdot\"),\n showlegend=False,\n hoverinfo=\"none\",\n )\n )\n\n # plot unbalance markers\n i = 0\n for n, m, p in zip(ub[0], ub[1], ub[2]):\n fig.add_trace(\n go.Scatter3d(\n x=[x_pos[int(n), 0], x_pos[int(n), 0]],\n y=Q_([0, np.amax(np.abs(maj_vect[4])) / 2 * np.cos(p)], base_unit)\n .to(amplitude_units)\n .m,\n z=Q_([0, np.amax(np.abs(maj_vect[4])) / 2 * np.sin(p)], base_unit)\n .to(amplitude_units)\n .m,\n mode=\"lines\",\n line=dict(color=\"firebrick\"),\n legendgroup=\"Unbalance\",\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n fig.add_trace(\n go.Scatter3d(\n x=[x_pos[int(n), 0]],\n y=Q_([np.amax(np.abs(maj_vect[4])) / 2 * np.cos(p)], base_unit)\n .to(amplitude_units)\n .m,\n z=Q_([np.amax(np.abs(maj_vect[4])) / 2 * np.sin(p)], base_unit)\n .to(amplitude_units)\n .m,\n mode=\"markers\",\n marker=dict(symbol=\"diamond\", color=\"firebrick\"),\n name=\"Unbalance\",\n legendgroup=\"Unbalance\",\n showlegend=True if i == 0 else False,\n hovertemplate=(\n \"Node: {}<br>\" + \"Magnitude: {:.2e}<br>\" + \"Phase: {:.2f}\"\n ).format(int(n), m, p),\n )\n )\n i += 1\n\n speed_str = Q_(speed, \"rad/s\").to(frequency_units).m\n fig.update_layout(\n scene=dict(\n xaxis=dict(title=dict(text=f\"Rotor Length ({rotor_length_units})\")),\n yaxis=dict(title=dict(text=f\"Amplitude - X ({amplitude_units})\")),\n zaxis=dict(title=dict(text=f\"Amplitude - Y ({amplitude_units})\")),\n ),\n title=dict(\n text=f\"Deflected Shape<br>Speed = {speed_str} {frequency_units}\"\n ),\n **kwargs,\n )\n\n return fig\n\n def plot_bending_moment(\n self,\n speed,\n frequency_units=\"rad/s\",\n moment_units=\"N*m\",\n rotor_length_units=\"m\",\n fig=None,\n **kwargs,\n ):\n \"\"\"Plot the bending moment diagram.\n\n Parameters\n ----------\n speed : float\n The rotor rotation speed. Must be an element from the speed_range argument\n passed to the class (rad/s).\n frequency_units : str, optional\n Frequency units.\n Default is \"rad/s\"\n moment_units : str, optional\n Moment units.\n Default is 'N*m'.\n rotor_length_units : str\n Rotor Length units.\n Default is m.\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the deflected shape\n plot layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n if not any(np.isclose(self.speed_range, speed, atol=1e-6)):\n raise ValueError(\"No data available for this speed value.\")\n\n Mx, My = self._calculate_bending_moment(speed=speed)\n Mx = Q_(Mx, \"N*m\").to(moment_units).m\n My = Q_(My, \"N*m\").to(moment_units).m\n Mr = np.sqrt(Mx ** 2 + My ** 2)\n\n nodes_pos = Q_(self.rotor.nodes_pos, \"m\").to(rotor_length_units).m\n\n if fig is None:\n fig = go.Figure()\n fig.add_trace(\n go.Scatter(\n x=nodes_pos,\n y=Mx,\n mode=\"lines\",\n name=f\"Bending Moment (X dir.) ({moment_units})\",\n legendgroup=\"Mx\",\n showlegend=True,\n hovertemplate=f\"Nodal Position: %{{x:.2f}}<br>Mx ({moment_units}): %{{y:.2e}}\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=nodes_pos,\n y=My,\n mode=\"lines\",\n name=f\"Bending Moment (Y dir.) ({moment_units})\",\n legendgroup=\"My\",\n showlegend=True,\n hovertemplate=f\"Nodal Position: %{{x:.2f}}<br>My ({moment_units}): %{{y:.2e}}\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=nodes_pos,\n y=Mr,\n mode=\"lines\",\n name=f\"Bending Moment (abs) ({moment_units})\",\n legendgroup=\"Mr\",\n showlegend=True,\n hovertemplate=f\"Nodal Position: %{{x:.2f}}<br>Mr ({moment_units}): %{{y:.2e}}\",\n )\n )\n\n # plot center line\n fig.add_trace(\n go.Scatter(\n x=nodes_pos,\n y=np.zeros_like(nodes_pos),\n mode=\"lines\",\n line=dict(color=\"black\", dash=\"dashdot\"),\n showlegend=False,\n hoverinfo=\"none\",\n )\n )\n\n fig.update_xaxes(title_text=f\"Rotor Length ({rotor_length_units})\")\n fig.update_yaxes(\n title_text=f\"Bending Moment ({moment_units})\",\n title_font=dict(size=12),\n )\n fig.update_layout(**kwargs)\n\n return fig\n\n def plot_deflected_shape(\n self,\n speed,\n samples=101,\n frequency_units=\"rad/s\",\n amplitude_units=\"m\",\n rotor_length_units=\"m\",\n moment_units=\"N*m\",\n shape2d_kwargs=None,\n shape3d_kwargs=None,\n bm_kwargs=None,\n subplot_kwargs=None,\n ):\n \"\"\"Plot deflected shape diagrams.\n\n This method returns a subplot with:\n - 3D view deflected shape;\n - 2D view deflected shape - Major Axis;\n - Bending Moment Diagram;\n\n Parameters\n ----------\n speed : float\n The rotor rotation speed. Must be an element from the speed_range argument\n passed to the class (rad/s).\n samples : int, optional\n Number of samples to generate the orbit for each node.\n Default is 101.\n frequency_units : str, optional\n Frequency units.\n Default is \"rad/s\"\n amplitude_units : str, optional\n Units for the response magnitude.\n Acceptable units dimensionality are:\n\n '[length]' - Displays the displacement;\n\n '[speed]' - Displays the velocity;\n\n '[acceleration]' - Displays the acceleration.\n\n Default is \"m/N\" 0 to peak.\n To use peak to peak use '<unit> pkpk' (e.g. 'm/N pkpk')\n rotor_length_units : str, optional\n Rotor length units.\n Default is 'm'.\n moment_units : str\n Moment units.\n Default is 'N*m'\n shape2d_kwargs : optional\n Additional key word arguments can be passed to change the 2D deflected shape\n plot layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n shape3d_kwargs : optional\n Additional key word arguments can be passed to change the 3D deflected shape\n plot layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n bm_kwargs : optional\n Additional key word arguments can be passed to change the bending moment\n diagram plot layout only (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n subplot_kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...). This kwargs override \"mag_kwargs\" and\n \"phase_kwargs\" dictionaries.\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n subplots : Plotly graph_objects.make_subplots()\n Plotly figure with Amplitude vs Frequency and Phase vs Frequency and\n polar Amplitude vs Phase plots.\n \"\"\"\n shape2d_kwargs = {} if shape2d_kwargs is None else copy.copy(shape2d_kwargs)\n shape3d_kwargs = {} if shape3d_kwargs is None else copy.copy(shape3d_kwargs)\n bm_kwargs = {} if bm_kwargs is None else copy.copy(bm_kwargs)\n subplot_kwargs = {} if subplot_kwargs is None else copy.copy(subplot_kwargs)\n speed_str = Q_(speed, \"rad/s\").to(frequency_units).m\n\n # fmt: off\n fig0 = self.plot_deflected_shape_2d(\n speed, frequency_units, amplitude_units, rotor_length_units, **shape2d_kwargs\n )\n fig1 = self.plot_deflected_shape_3d(\n speed, samples, frequency_units, amplitude_units, rotor_length_units, **shape3d_kwargs\n )\n fig2 = self.plot_bending_moment(\n speed, frequency_units, moment_units, rotor_length_units, **bm_kwargs\n )\n # fmt: on\n\n subplots = make_subplots(\n rows=2,\n cols=2,\n specs=[[{}, {\"type\": \"scene\", \"rowspan\": 2}], [{}, None]],\n shared_xaxes=True,\n vertical_spacing=0.02,\n )\n for data in fig0[\"data\"]:\n subplots.add_trace(data, row=1, col=1)\n for data in fig1[\"data\"]:\n subplots.add_trace(data, row=1, col=2)\n for data in fig2[\"data\"]:\n subplots.add_trace(data, row=2, col=1)\n\n subplots.update_yaxes(fig0.layout.yaxis, row=1, col=1)\n subplots.update_xaxes(fig2.layout.xaxis, row=2, col=1)\n subplots.update_yaxes(fig2.layout.yaxis, row=2, col=1)\n subplots.update_layout(\n scene=dict(\n bgcolor=fig1.layout.scene.bgcolor,\n xaxis=fig1.layout.scene.xaxis,\n yaxis=fig1.layout.scene.yaxis,\n zaxis=fig1.layout.scene.zaxis,\n domain=dict(x=[0.47, 1]),\n ),\n title=dict(\n text=f\"Deflected Shape<br>Speed = {speed_str} {frequency_units}\",\n ),\n legend=dict(\n orientation=\"h\",\n xanchor=\"center\",\n yanchor=\"bottom\",\n x=0.5,\n y=-0.3,\n ),\n **subplot_kwargs,\n )\n\n return subplots\n\n\nclass StaticResults(Results):\n \"\"\"Class used to store results and provide plots for Static Analysis.\n\n This class plots free-body diagram, deformed shaft, shearing\n force diagram and bending moment diagram.\n\n Parameters\n ----------\n deformation : array\n shaft displacement in y direction.\n Vx : array\n shearing force array.\n Bm : array\n bending moment array.\n w_shaft : dataframe\n shaft dataframe\n disk_forces : dict\n Indicates the force exerted by each disk.\n bearing_forces : dict\n Relates the static force at each node due to the bearing reaction forces.\n nodes : list\n list of nodes numbers.\n nodes_pos : list\n list of nodes positions.\n Vx_axis : array\n X axis for displaying shearing force and bending moment.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n Plotly figure with Static Analysis plots depending on which method\n is called.\n \"\"\"\n\n def __init__(\n self,\n deformation,\n Vx,\n Bm,\n w_shaft,\n disk_forces,\n bearing_forces,\n nodes,\n nodes_pos,\n Vx_axis,\n ):\n\n self.deformation = deformation\n self.Vx = Vx\n self.Bm = Bm\n self.w_shaft = w_shaft\n self.disk_forces = disk_forces\n self.bearing_forces = bearing_forces\n self.nodes = nodes\n self.nodes_pos = nodes_pos\n self.Vx_axis = Vx_axis\n\n def plot_deformation(\n self, deformation_units=\"m\", rotor_length_units=\"m\", fig=None, **kwargs\n ):\n \"\"\"Plot the shaft static deformation.\n\n This method plots:\n deformed shaft\n\n Parameters\n ----------\n deformation_units : str\n Deformation units.\n Default is 'm'.\n rotor_length_units : str\n Rotor Length units.\n Default is 'm'.\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n if fig is None:\n fig = go.Figure()\n\n shaft_end = max([sublist[-1] for sublist in self.nodes_pos])\n shaft_end = Q_(shaft_end, \"m\").to(rotor_length_units).m\n\n # fig - plot centerline\n fig.add_trace(\n go.Scatter(\n x=[-0.01 * shaft_end, 1.01 * shaft_end],\n y=[0, 0],\n mode=\"lines\",\n line=dict(color=\"black\", dash=\"dashdot\"),\n showlegend=False,\n hoverinfo=\"none\",\n )\n )\n\n count = 0\n for deformation, Vx, Bm, nodes, nodes_pos, Vx_axis in zip(\n self.deformation, self.Vx, self.Bm, self.nodes, self.nodes_pos, self.Vx_axis\n ):\n\n fig.add_trace(\n go.Scatter(\n x=Q_(nodes_pos, \"m\").to(rotor_length_units).m,\n y=Q_(deformation, \"m\").to(deformation_units).m,\n mode=\"lines\",\n line_shape=\"spline\",\n line_smoothing=1.0,\n name=f\"Shaft {count}\",\n showlegend=True,\n hovertemplate=(\n f\"Rotor Length ({rotor_length_units}): %{{x:.2f}}<br>Displacement ({deformation_units}): %{{y:.2e}}\"\n ),\n )\n )\n count += 1\n\n fig.update_xaxes(title_text=f\"Rotor Length ({rotor_length_units})\")\n fig.update_yaxes(title_text=f\"Deformation ({deformation_units})\")\n fig.update_layout(title=dict(text=\"Static Deformation\"), **kwargs)\n\n return fig\n\n def plot_free_body_diagram(\n self, force_units=\"N\", rotor_length_units=\"m\", fig=None, **kwargs\n ):\n \"\"\"Plot the rotor free-body diagram.\n\n Parameters\n ----------\n force_units : str\n Force units.\n Default is 'N'.\n rotor_length_units : str\n Rotor Length units.\n Default is 'm'.\n subplots : Plotly graph_objects.make_subplots()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. plot_bgcolor=\"white\", ...).\n *See Plotly Python make_subplot Reference for more information.\n\n Returns\n -------\n subplots : Plotly graph_objects.make_subplots()\n The figure object with the plot.\n \"\"\"\n cols = 1 if len(self.nodes_pos) < 2 else 2\n rows = len(self.nodes_pos) // 2 + len(self.nodes_pos) % 2\n if fig is None:\n fig = make_subplots(\n rows=rows,\n cols=cols,\n subplot_titles=[\n \"Free-Body Diagram - Shaft {}\".format(j)\n for j in range(len(self.nodes_pos))\n ],\n )\n j = 0\n y_start = 5.0\n for nodes_pos, nodes in zip(self.nodes_pos, self.nodes):\n col = j % 2 + 1\n row = j // 2 + 1\n\n fig.add_trace(\n go.Scatter(\n x=Q_(nodes_pos, \"m\").to(rotor_length_units).m,\n y=np.zeros(len(nodes_pos)),\n mode=\"lines\",\n line=dict(color=\"black\"),\n hoverinfo=\"none\",\n showlegend=False,\n ),\n row=row,\n col=col,\n )\n fig.add_trace(\n go.Scatter(\n x=Q_(nodes_pos, \"m\").to(rotor_length_units).m,\n y=[y_start] * len(nodes_pos),\n mode=\"lines\",\n line=dict(color=\"black\"),\n hoverinfo=\"none\",\n showlegend=False,\n ),\n row=row,\n col=col,\n )\n\n # fig - plot arrows indicating shaft weight distribution\n text = \"{:.1f}\".format(Q_(self.w_shaft[j], \"N\").to(force_units).m)\n ini = nodes_pos[0]\n fin = nodes_pos[-1]\n arrows_list = np.arange(ini, 1.01 * fin, (fin - ini) / 5.0)\n for node in arrows_list:\n fig.add_annotation(\n x=Q_(node, \"m\").to(rotor_length_units).m,\n y=0,\n axref=\"x{}\".format(j + 1),\n ayref=\"y{}\".format(j + 1),\n showarrow=True,\n arrowhead=2,\n arrowsize=1,\n arrowwidth=5,\n arrowcolor=\"DimGray\",\n ax=Q_(node, \"m\").to(rotor_length_units).m,\n ay=y_start * 1.08,\n row=row,\n col=col,\n )\n fig.add_annotation(\n x=Q_(nodes_pos[0], \"m\").to(rotor_length_units).m,\n y=y_start,\n xref=\"x{}\".format(j + 1),\n yref=\"y{}\".format(j + 1),\n xshift=125,\n yshift=20,\n text=f\"Shaft weight = {text}{force_units}\",\n align=\"right\",\n showarrow=False,\n )\n\n # plot bearing reaction forces\n for k, v in self.bearing_forces.items():\n _, node = k.split(\"_\")\n node = int(node)\n if node in nodes:\n text = f\"{Q_(v, 'N').to(force_units).m:.2f}\"\n var = 1 if v < 0 else -1\n fig.add_annotation(\n x=Q_(nodes_pos[nodes.index(node)], \"m\")\n .to(rotor_length_units)\n .m,\n y=0,\n axref=\"x{}\".format(j + 1),\n ayref=\"y{}\".format(j + 1),\n text=f\"Fb = {text}{force_units}\",\n textangle=90,\n showarrow=True,\n arrowhead=2,\n arrowsize=1,\n arrowwidth=5,\n arrowcolor=\"DarkSalmon\",\n ax=Q_(nodes_pos[nodes.index(node)], \"m\")\n .to(rotor_length_units)\n .m,\n ay=var * 2.5 * y_start,\n row=row,\n col=col,\n )\n\n # plot disk forces\n for k, v in self.disk_forces.items():\n _, node = k.split(\"_\")\n node = int(node)\n if node in nodes:\n text = f\"{-Q_(v, 'N').to(force_units).m:.2f}\"\n fig.add_annotation(\n x=Q_(nodes_pos[nodes.index(node)], \"m\")\n .to(rotor_length_units)\n .m,\n y=0,\n axref=\"x{}\".format(j + 1),\n ayref=\"y{}\".format(j + 1),\n text=f\"Fd = {text}{force_units}\",\n textangle=270,\n showarrow=True,\n arrowhead=2,\n arrowsize=1,\n arrowwidth=5,\n arrowcolor=\"FireBrick\",\n ax=Q_(nodes_pos[nodes.index(node)], \"m\")\n .to(rotor_length_units)\n .m,\n ay=2.5 * y_start,\n row=row,\n col=col,\n )\n\n fig.update_xaxes(\n title_text=f\"Rotor Length ({rotor_length_units})\", row=row, col=col\n )\n fig.update_yaxes(\n visible=False, gridcolor=\"lightgray\", showline=False, row=row, col=col\n )\n j += 1\n\n fig.update_layout(**kwargs)\n\n return fig\n\n def plot_shearing_force(\n self, force_units=\"N\", rotor_length_units=\"m\", fig=None, **kwargs\n ):\n \"\"\"Plot the rotor shearing force diagram.\n\n This method plots:\n shearing force diagram.\n\n Parameters\n ----------\n force_units : str\n Force units.\n Default is 'N'.\n rotor_length_units : str\n Rotor Length units.\n Default is 'm'.\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n if fig is None:\n fig = go.Figure()\n\n shaft_end = (\n Q_(max([sublist[-1] for sublist in self.nodes_pos]), \"m\")\n .to(rotor_length_units)\n .m\n )\n\n # fig - plot centerline\n fig.add_trace(\n go.Scatter(\n x=[-0.1 * shaft_end, 1.1 * shaft_end],\n y=[0, 0],\n mode=\"lines\",\n line=dict(color=\"black\", dash=\"dashdot\"),\n showlegend=False,\n hoverinfo=\"none\",\n )\n )\n\n j = 0\n for Vx, Vx_axis in zip(self.Vx, self.Vx_axis):\n fig.add_trace(\n go.Scatter(\n x=Q_(Vx_axis, \"m\").to(rotor_length_units).m,\n y=Q_(Vx, \"N\").to(force_units).m,\n mode=\"lines\",\n name=f\"Shaft {j}\",\n legendgroup=f\"Shaft {j}\",\n showlegend=True,\n hovertemplate=(\n f\"Rotor Length ({rotor_length_units}): %{{x:.2f}}<br>Shearing Force ({force_units}): %{{y:.2f}}\"\n ),\n )\n )\n j += 1\n\n fig.update_xaxes(\n title_text=f\"Rotor Length ({rotor_length_units})\",\n range=[-0.1 * shaft_end, 1.1 * shaft_end],\n )\n fig.update_yaxes(title_text=f\"Force ({force_units})\")\n fig.update_layout(title=dict(text=\"Shearing Force Diagram\"), **kwargs)\n\n return fig\n\n def plot_bending_moment(\n self, moment_units=\"N*m\", rotor_length_units=\"m\", fig=None, **kwargs\n ):\n \"\"\"Plot the rotor bending moment diagram.\n\n This method plots:\n bending moment diagram.\n\n Parameters\n ----------\n moment_units : str, optional\n Moment units.\n Default is 'N*m'.\n rotor_length_units : str\n Rotor Length units.\n Default is 'm'.\n fig : Plotly graph_objects.Figure()\n Plotly figure with the bending moment diagram plot\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n Plotly figure with the bending moment diagram plot\n \"\"\"\n if fig is None:\n fig = go.Figure()\n\n shaft_end = (\n Q_(max([sublist[-1] for sublist in self.nodes_pos]), \"m\")\n .to(rotor_length_units)\n .m\n )\n\n # fig - plot centerline\n fig.add_trace(\n go.Scatter(\n x=[-0.1 * shaft_end, 1.1 * shaft_end],\n y=[0, 0],\n mode=\"lines\",\n line=dict(color=\"black\", dash=\"dashdot\"),\n showlegend=False,\n hoverinfo=\"none\",\n )\n )\n\n j = 0\n for Bm, nodes_pos in zip(self.Bm, self.Vx_axis):\n fig.add_trace(\n go.Scatter(\n x=Q_(nodes_pos, \"m\").to(rotor_length_units).m,\n y=Q_(Bm, \"N*m\").to(moment_units).m,\n mode=\"lines\",\n line_shape=\"spline\",\n line_smoothing=1.0,\n name=f\"Shaft {j}\",\n legendgroup=f\"Shaft {j}\",\n showlegend=True,\n hovertemplate=(\n f\"Rotor Length ({rotor_length_units}): %{{x:.2f}}<br>Bending Moment ({moment_units}): %{{y:.2f}}\"\n ),\n )\n )\n j += 1\n\n fig.update_xaxes(title_text=f\"Rotor Length ({rotor_length_units})\")\n fig.update_yaxes(title_text=f\"Bending Moment ({moment_units})\")\n fig.update_layout(title=dict(text=\"Bending Moment Diagram\"), **kwargs)\n\n return fig\n\n\nclass SummaryResults(Results):\n \"\"\"Class used to store results and provide plots rotor summary.\n\n This class aims to present a summary of the main parameters and attributes\n from a rotor model. The data is presented in a table format.\n\n Parameters\n ----------\n df_shaft: dataframe\n shaft dataframe\n df_disks: dataframe\n disks dataframe\n df_bearings: dataframe\n bearings dataframe\n brg_forces: list\n list of reaction forces on bearings\n nodes_pos: list\n list of nodes axial position\n CG: float\n rotor center of gravity\n Ip: float\n rotor total moment of inertia around the center line\n tag: str\n rotor's tag\n\n Returns\n -------\n fig : Plotly graph_objects.make_subplots()\n The figure object with the tables plot.\n \"\"\"\n\n def __init__(\n self, df_shaft, df_disks, df_bearings, nodes_pos, brg_forces, CG, Ip, tag\n ):\n self.df_shaft = df_shaft\n self.df_disks = df_disks\n self.df_bearings = df_bearings\n self.brg_forces = brg_forces\n self.nodes_pos = np.array(nodes_pos)\n self.CG = CG\n self.Ip = Ip\n self.tag = tag\n\n def plot(self):\n \"\"\"Plot the summary table.\n\n This method plots:\n Table with summary of rotor parameters and attributes\n\n Returns\n -------\n fig : Plotly graph_objects.make_subplots()\n The figure object with the tables plot.\n \"\"\"\n materials = [mat.name for mat in self.df_shaft[\"material\"]]\n\n shaft_data = {\n \"Shaft number\": self.df_shaft[\"shaft_number\"],\n \"Left station\": self.df_shaft[\"n_l\"],\n \"Right station\": self.df_shaft[\"n_r\"],\n \"Elem number\": self.df_shaft[\"_n\"],\n \"Beam left loc\": self.df_shaft[\"nodes_pos_l\"],\n \"Length\": self.df_shaft[\"L\"],\n \"Axial CG Pos\": self.df_shaft[\"axial_cg_pos\"],\n \"Beam right loc\": self.df_shaft[\"nodes_pos_r\"],\n \"Material\": materials,\n \"Mass\": self.df_shaft[\"m\"].map(\"{:.3f}\".format),\n \"Inertia\": self.df_shaft[\"Im\"].map(\"{:.2e}\".format),\n }\n\n rotor_data = {\n \"Tag\": [self.tag],\n \"Starting node\": [self.df_shaft[\"n_l\"].iloc[0]],\n \"Ending node\": [self.df_shaft[\"n_r\"].iloc[-1]],\n \"Starting point\": [self.df_shaft[\"nodes_pos_l\"].iloc[0]],\n \"Total lenght\": [self.df_shaft[\"nodes_pos_r\"].iloc[-1]],\n \"CG\": [\"{:.3f}\".format(self.CG)],\n \"Ip\": [\"{:.3e}\".format(self.Ip)],\n \"Rotor Mass\": [\n \"{:.3f}\".format(np.sum(self.df_shaft[\"m\"]) + np.sum(self.df_disks[\"m\"]))\n ],\n }\n\n disk_data = {\n \"Tag\": self.df_disks[\"tag\"],\n \"Shaft number\": self.df_disks[\"shaft_number\"],\n \"Node\": self.df_disks[\"n\"],\n \"Nodal Position\": self.nodes_pos[self.df_bearings[\"n\"]],\n \"Mass\": self.df_disks[\"m\"].map(\"{:.3f}\".format),\n \"Ip\": self.df_disks[\"Ip\"].map(\"{:.3e}\".format),\n }\n\n bearing_data = {\n \"Tag\": self.df_bearings[\"tag\"],\n \"Shaft number\": self.df_bearings[\"shaft_number\"],\n \"Node\": self.df_bearings[\"n\"],\n \"N_link\": self.df_bearings[\"n_link\"],\n \"Nodal Position\": self.nodes_pos[self.df_bearings[\"n\"]],\n \"Bearing force\": list(self.brg_forces.values()),\n }\n\n fig = make_subplots(\n rows=2,\n cols=2,\n specs=[\n [{\"type\": \"table\"}, {\"type\": \"table\"}],\n [{\"type\": \"table\"}, {\"type\": \"table\"}],\n ],\n subplot_titles=[\n \"Rotor data\",\n \"Shaft Element data\",\n \"Disk Element data\",\n \"Bearing Element data\",\n ],\n )\n colors = [\"#ffffff\", \"#c4d9ed\"]\n fig.add_trace(\n go.Table(\n header=dict(\n values=[\"{}\".format(k) for k in rotor_data.keys()],\n font=dict(size=12, color=\"white\"),\n line=dict(color=\"#1f4060\", width=1.5),\n fill=dict(color=\"#1f4060\"),\n align=\"center\",\n ),\n cells=dict(\n values=list(rotor_data.values()),\n font=dict(size=12),\n line=dict(color=\"#1f4060\"),\n fill=dict(color=\"white\"),\n align=\"center\",\n height=25,\n ),\n ),\n row=1,\n col=1,\n )\n\n cell_colors = [colors[i % 2] for i in range(len(materials))]\n fig.add_trace(\n go.Table(\n header=dict(\n values=[\"{}\".format(k) for k in shaft_data.keys()],\n font=dict(family=\"Verdana\", size=12, color=\"white\"),\n line=dict(color=\"#1e4162\", width=1.5),\n fill=dict(color=\"#1e4162\"),\n align=\"center\",\n ),\n cells=dict(\n values=list(shaft_data.values()),\n font=dict(family=\"Verdana\", size=12, color=\"#12263b\"),\n line=dict(color=\"#c4d9ed\", width=1.5),\n fill=dict(color=[cell_colors * len(shaft_data)]),\n align=\"center\",\n height=25,\n ),\n ),\n row=1,\n col=2,\n )\n\n cell_colors = [colors[i % 2] for i in range(len(self.df_disks[\"tag\"]))]\n fig.add_trace(\n go.Table(\n header=dict(\n values=[\"{}\".format(k) for k in disk_data.keys()],\n font=dict(family=\"Verdana\", size=12, color=\"white\"),\n line=dict(color=\"#1e4162\", width=1.5),\n fill=dict(color=\"#1e4162\"),\n align=\"center\",\n ),\n cells=dict(\n values=list(disk_data.values()),\n font=dict(family=\"Verdana\", size=12, color=\"#12263b\"),\n line=dict(color=\"#c4d9ed\", width=1.5),\n fill=dict(color=[cell_colors * len(shaft_data)]),\n align=\"center\",\n height=25,\n ),\n ),\n row=2,\n col=1,\n )\n\n cell_colors = [colors[i % 2] for i in range(len(self.df_bearings[\"tag\"]))]\n fig.add_trace(\n go.Table(\n header=dict(\n values=[\"{}\".format(k) for k in bearing_data.keys()],\n font=dict(family=\"Verdana\", size=12, color=\"white\"),\n line=dict(color=\"#1e4162\", width=1.5),\n fill=dict(color=\"#1e4162\"),\n align=\"center\",\n ),\n cells=dict(\n values=list(bearing_data.values()),\n font=dict(family=\"Verdana\", size=12, color=\"#12263b\"),\n line=dict(color=\"#c4d9ed\", width=1.5),\n fill=dict(color=[cell_colors * len(shaft_data)]),\n align=\"center\",\n height=25,\n ),\n ),\n row=2,\n col=2,\n )\n return fig\n\n\nclass ConvergenceResults(Results):\n \"\"\"Class used to store results and provide plots for Convergence Analysis.\n\n This class plots:\n Natural Frequency vs Number of Elements\n Relative Error vs Number of Elements\n\n Parameters\n ----------\n el_num : array\n Array with number of elements in each iteraction\n eigv_arr : array\n Array with the n'th natural frequency in each iteraction\n error_arr : array\n Array with the relative error in each iteraction\n\n Returns\n -------\n fig : Plotly graph_objects.make_subplots()\n The figure object with the plot.\n \"\"\"\n\n def __init__(self, el_num, eigv_arr, error_arr):\n self.el_num = el_num\n self.eigv_arr = eigv_arr\n self.error_arr = error_arr\n\n def plot(self, fig=None, **kwargs):\n \"\"\"Plot convergence results.\n\n This method plots:\n Natural Frequency vs Number of Elements\n Relative Error vs Number of Elements\n\n Parameters\n ----------\n fig : Plotly graph_objects.make_subplots()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.make_subplots()\n The figure object with the plot.\n \"\"\"\n if fig is None:\n fig = make_subplots(\n rows=1,\n cols=2,\n subplot_titles=[\"Frequency Evaluation\", \"Relative Error Evaluation\"],\n )\n\n # plot Frequency vs number of elements\n fig.add_trace(\n go.Scatter(\n x=self.el_num,\n y=self.eigv_arr,\n mode=\"lines+markers\",\n hovertemplate=(\n \"Number of Elements: %{x:.2f}<br>\" + \"Frequency: %{y:.0f}\"\n ),\n showlegend=False,\n ),\n row=1,\n col=1,\n )\n fig.update_xaxes(title_text=\"Number of Elements\", row=1, col=1)\n fig.update_yaxes(title_text=\"Frequency\", row=1, col=1)\n\n # plot Error vs number of elements\n fig.add_trace(\n go.Scatter(\n x=self.el_num,\n y=self.error_arr,\n mode=\"lines+markers\",\n hovertemplate=(\n \"Number of Elements: %{x:.2f}<br>\" + \"Relative Error: %{y:.0f}\"\n ),\n showlegend=False,\n ),\n row=1,\n col=2,\n )\n\n fig.update_xaxes(title_text=\"Number of Elements\", row=1, col=2)\n fig.update_yaxes(title_text=\"Relative Error (%)\", row=1, col=2)\n\n fig.update_layout(**kwargs)\n\n return fig\n\n\nclass TimeResponseResults(Results):\n \"\"\"Class used to store results and provide plots for Time Response Analysis.\n\n This class takes the results from time response analysis and creates a\n plots given a force and a time. It's possible to select through a time response for\n a single DoF, an orbit response for a single node or display orbit response for all\n nodes.\n The plot type options are:\n - 1d: plot time response for given probes.\n - 2d: plot orbit of a selected node of a rotor system.\n - 3d: plot orbits for each node on the rotor system in a 3D view.\n\n plot_1d: input probes.\n plot_2d: input a node.\n plot_3d: no need to input probes or node.\n\n Parameters\n ----------\n rotor : Rotor.object\n The Rotor object\n t : array\n Time values for the output.\n yout : array\n System response.\n xout : array\n Time evolution of the state vector.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n\n def __init__(self, rotor, t, yout, xout):\n self.t = t\n self.yout = yout\n self.xout = xout\n self.rotor = rotor\n\n def plot_1d(\n self,\n probe,\n probe_units=\"rad\",\n displacement_units=\"m\",\n time_units=\"s\",\n fig=None,\n **kwargs,\n ):\n \"\"\"Plot time response.\n\n This method plots the time response given a tuple of probes with their nodes\n and orientations.\n\n Parameters\n ----------\n probe : list\n List with tuples (node, orientation angle, tag).\n\n node : int -> Indicate the node where the probe is located.\n\n orientation : float -> Probe orientation angle about the shaft.\n The 0 refers to +X direction.\n The strings 'major' and 'minor' can also be used to reference the major\n and minor axis.\n\n tag : str, optional -> Probe tag to be add a DataFrame column title.\n probe_units : str, option\n Units for probe orientation.\n Default is \"rad\".\n displacement_units : str, optional\n Displacement units.\n Default is 'm'.\n time_units : str\n Time units.\n Default is 's'.\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n nodes = self.rotor.nodes\n link_nodes = self.rotor.link_nodes\n ndof = self.rotor.number_dof\n\n if fig is None:\n fig = go.Figure()\n\n for i, p in enumerate(probe):\n fix_dof = (p[0] - nodes[-1] - 1) * ndof // 2 if p[0] in link_nodes else 0\n dofx = ndof * p[0] - fix_dof\n dofy = ndof * p[0] + 1 - fix_dof\n\n angle = Q_(p[1], probe_units).to(\"rad\").m\n\n # fmt: off\n operator = np.array(\n [[np.cos(angle), - np.sin(angle)],\n [np.cos(angle), + np.sin(angle)]]\n )\n\n _probe_resp = operator @ np.vstack((self.yout[:, dofx], self.yout[:, dofy]))\n probe_resp = (\n _probe_resp[0] * np.cos(angle) ** 2 +\n _probe_resp[1] * np.sin(angle) ** 2\n )\n # fmt: on\n\n probe_resp = Q_(probe_resp, \"m\").to(displacement_units).m\n\n try:\n probe_tag = p[2]\n except IndexError:\n probe_tag = f\"Probe {i+1} - Node {p[0]}\"\n\n fig.add_trace(\n go.Scatter(\n x=Q_(self.t, \"s\").to(time_units).m,\n y=Q_(probe_resp, \"m\").to(displacement_units).m,\n mode=\"lines\",\n name=probe_tag,\n legendgroup=probe_tag,\n showlegend=True,\n hovertemplate=f\"Time ({time_units}): %{{x:.2f}}<br>Amplitude ({displacement_units}): %{{y:.2e}}\",\n )\n )\n\n fig.update_xaxes(title_text=f\"Time ({time_units})\")\n fig.update_yaxes(title_text=f\"Amplitude ({displacement_units})\")\n fig.update_layout(**kwargs)\n\n return fig\n\n def plot_2d(self, node, displacement_units=\"m\", fig=None, **kwargs):\n \"\"\"Plot orbit response (2D).\n\n This function will take a rotor object and plot its orbit response using Plotly.\n\n Parameters\n ----------\n node: int, optional\n Selected node to plot orbit.\n displacement_units : str, optional\n Displacement units.\n Default is 'm'.\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n nodes = self.rotor.nodes\n link_nodes = self.rotor.link_nodes\n ndof = self.rotor.number_dof\n\n fix_dof = (node - nodes[-1] - 1) * ndof // 2 if node in link_nodes else 0\n dofx = ndof * node - fix_dof\n dofy = ndof * node + 1 - fix_dof\n\n if fig is None:\n fig = go.Figure()\n\n fig.add_trace(\n go.Scatter(\n x=Q_(self.yout[:, dofx], \"m\").to(displacement_units).m,\n y=Q_(self.yout[:, dofy], \"m\").to(displacement_units).m,\n mode=\"lines\",\n name=\"Orbit\",\n legendgroup=\"Orbit\",\n showlegend=False,\n hovertemplate=(\n f\"X - Amplitude ({displacement_units}): %{{x:.2e}}<br>Y - Amplitude ({displacement_units}): %{{y:.2e}}\"\n ),\n )\n )\n\n fig.update_xaxes(title_text=f\"Amplitude ({displacement_units}) - X direction\")\n fig.update_yaxes(title_text=f\"Amplitude ({displacement_units}) - Y direction\")\n fig.update_layout(\n title=dict(text=\"Response for node {}\".format(node)), **kwargs\n )\n\n return fig\n\n def plot_3d(\n self, displacement_units=\"m\", rotor_length_units=\"m\", fig=None, **kwargs\n ):\n \"\"\"Plot orbit response (3D).\n\n This function will take a rotor object and plot its orbit response using Plotly.\n\n Parameters\n ----------\n displacement_units : str\n Displacement units.\n Default is 'm'.\n rotor_length_units : str\n Rotor Length units.\n Default is 'm'.\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. hoverlabel_align=\"center\", ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n nodes_pos = self.rotor.nodes_pos\n nodes = self.rotor.nodes\n ndof = self.rotor.number_dof\n\n if fig is None:\n fig = go.Figure()\n\n for n in nodes:\n x_pos = np.ones(self.yout.shape[0]) * nodes_pos[n]\n fig.add_trace(\n go.Scatter3d(\n x=Q_(x_pos, \"m\").to(rotor_length_units).m,\n y=Q_(self.yout[:, ndof * n], \"m\").to(displacement_units).m,\n z=Q_(self.yout[:, ndof * n + 1], \"m\").to(displacement_units).m,\n mode=\"lines\",\n line=dict(color=tableau_colors[\"blue\"]),\n name=\"Mean\",\n legendgroup=\"mean\",\n showlegend=False,\n hovertemplate=(\n f\"Nodal Position ({rotor_length_units}): %{{x:.2f}}<br>X - Amplitude ({displacement_units}): %{{y:.2e}}<br>Y - Amplitude ({displacement_units}): %{{z:.2e}}\"\n ),\n **kwargs,\n )\n )\n\n # plot center line\n line = np.zeros(len(nodes_pos))\n\n fig.add_trace(\n go.Scatter3d(\n x=Q_(nodes_pos, \"m\").to(rotor_length_units).m,\n y=line,\n z=line,\n mode=\"lines\",\n line=dict(color=\"black\", dash=\"dashdot\"),\n showlegend=False,\n )\n )\n\n fig.update_layout(\n scene=dict(\n xaxis=dict(title=dict(text=f\"Rotor Length ({rotor_length_units})\")),\n yaxis=dict(title=dict(text=f\"Amplitude - X ({displacement_units})\")),\n zaxis=dict(title=dict(text=f\"Amplitude - Y ({displacement_units})\")),\n ),\n **kwargs,\n )\n\n return fig\n\n\nclass UCSResults(Results):\n \"\"\"Class used to store results and provide plots for UCS Analysis.\n\n Parameters\n ----------\n stiffness_range : tuple, optional\n Tuple with (start, end) for stiffness range.\n stiffness_log : tuple, optional\n Evenly numbers spaced evenly on a log scale to create a better visualization\n (see np.logspace).\n wn : array\n Undamped natural frequencies array.\n bearing : ross.BearingElement\n Bearing used in the calculation.\n intersection_points : array\n Points where there is a intersection between undamped natural frequency and\n the bearing stiffness.\n \"\"\"\n\n def __init__(\n self, stiffness_range, stiffness_log, wn, bearing, intersection_points\n ):\n self.stiffness_range = stiffness_range\n self.stiffness_log = stiffness_log\n self.wn = wn\n self.bearing = bearing\n self.intersection_points = intersection_points\n\n def plot(\n self,\n fig=None,\n stiffness_units=\"N/m\",\n frequency_units=\"rad/s\",\n **kwargs,\n ):\n \"\"\"Plot undamped critical speed map.\n\n This method will plot the undamped critical speed map for a given range\n of stiffness values. If the range is not provided, the bearing\n stiffness at rated speed will be used to create a range.\n\n Parameters\n ----------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n stiffness_units : str, optional\n Units for the x axis.\n Default is N/m.\n frequency_units : str, optional\n Units for th y axis.\n Default is rad/s\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n\n stiffness_log = self.stiffness_log\n rotor_wn = self.wn\n bearing0 = self.bearing\n intersection_points = self.intersection_points\n\n if fig is None:\n fig = go.Figure()\n\n # convert to desired units\n stiffness_log = Q_(stiffness_log, \"N/m\").to(stiffness_units).m\n rotor_wn = Q_(rotor_wn, \"rad/s\").to(frequency_units).m\n intersection_points[\"x\"] = (\n Q_(intersection_points[\"x\"], \"N/m\").to(stiffness_units).m\n )\n intersection_points[\"y\"] = (\n Q_(intersection_points[\"y\"], \"rad/s\").to(frequency_units).m\n )\n bearing_kxx_stiffness = (\n Q_(bearing0.kxx.interpolated(bearing0.frequency), \"N/m\")\n .to(stiffness_units)\n .m\n )\n bearing_kyy_stiffness = (\n Q_(bearing0.kyy.interpolated(bearing0.frequency), \"N/m\")\n .to(stiffness_units)\n .m\n )\n bearing_frequency = Q_(bearing0.frequency, \"rad/s\").to(frequency_units).m\n\n for j in range(rotor_wn.shape[0]):\n fig.add_trace(\n go.Scatter(\n x=stiffness_log,\n y=rotor_wn[j],\n mode=\"lines\",\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n\n fig.add_trace(\n go.Scatter(\n x=intersection_points[\"x\"],\n y=intersection_points[\"y\"],\n mode=\"markers\",\n marker=dict(symbol=\"circle-open-dot\", color=\"red\", size=8),\n hovertemplate=f\"Stiffness ({stiffness_units}): %{{x:.2e}}<br>Frequency ({frequency_units}): %{{y:.2f}}\",\n showlegend=False,\n name=\"\",\n )\n )\n\n fig.add_trace(\n go.Scatter(\n x=bearing_kxx_stiffness,\n y=bearing_frequency,\n mode=\"lines\",\n line=dict(dash=\"dashdot\"),\n hoverinfo=\"none\",\n name=\"Kxx\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=bearing_kyy_stiffness,\n y=bearing_frequency,\n mode=\"lines\",\n line=dict(dash=\"dashdot\"),\n hoverinfo=\"none\",\n name=\"Kyy\",\n )\n )\n\n fig.update_xaxes(\n title_text=f\"Bearing Stiffness ({stiffness_units})\",\n type=\"log\",\n exponentformat=\"power\",\n )\n fig.update_yaxes(\n title_text=f\"Critical Speed ({frequency_units})\",\n type=\"log\",\n exponentformat=\"power\",\n )\n fig.update_layout(title=dict(text=\"Undamped Critical Speed Map\"), **kwargs)\n\n return fig\n\n\nclass Level1Results(Results):\n \"\"\"Class used to store results and provide plots for Level 1 Stability Analysis.\n\n Parameters\n ----------\n stiffness_range : array\n Stiffness array used in the calculation.\n log_dec : array\n Calculated log dec array for each cross coupling.\n \"\"\"\n\n def __init__(self, stiffness_range, log_dec):\n self.stiffness_range = stiffness_range\n self.log_dec = log_dec\n\n def plot(self, fig=None, **kwargs):\n \"\"\"Plot level 1 stability analysis.\n\n This method will plot the stability 1 analysis for a\n given stiffness range.\n\n Parameters\n ----------\n fig : Plotly graph_objects.Figure\n The figure object with the plot.\n\n kwargs : optional\n Additional key word arguments can be passed to change the plot layout only\n (e.g. width=1000, height=800, ...).\n *See Plotly Python Figure Reference for more information.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n \"\"\"\n if fig is None:\n fig = go.Figure()\n\n stiffness = self.stiffness_range\n log_dec = self.log_dec\n\n fig.add_trace(\n go.Scatter(\n x=stiffness,\n y=log_dec,\n mode=\"lines\",\n line=dict(width=3),\n showlegend=False,\n hovertemplate=(\"Stiffness: %{x:.2e}<br>\" + \"Log Dec: %{y:.2f}\"),\n )\n )\n\n fig.update_xaxes(\n title_text=\"Applied Cross Coupled Stiffness\", exponentformat=\"power\"\n )\n fig.update_yaxes(title_text=\"Log Dec\", exponentformat=\"power\")\n fig.update_layout(title=dict(text=\"Level 1 stability analysis\"), **kwargs)\n" ]
[ [ "numpy.imag", "numpy.sqrt", "numpy.linspace", "numpy.arctan", "pandas.DataFrame", "numpy.dtype", "numpy.max", "numpy.zeros_like", "numpy.exp", "numpy.hstack", "numpy.ones_like", "numpy.arange", "numpy.sin", "numpy.real", "numpy.repeat", "numpy.isclose", "numpy.min", "numpy.delete", "numpy.array", "numpy.sum", "scipy.linalg.eig", "numpy.absolute", "numpy.abs", "numpy.conj", "numpy.cos", "numpy.ones", "numpy.angle", "numpy.vstack" ] ]
EdwardYGLi/snake_RL
[ "210f552faca380c054fb5310b6c61ea2b1dfadcc" ]
[ "snake.py" ]
[ "\"\"\"\nCreated by Edward Li at 10/6/20\nfollowed game code from https://github.com/maurock/snake-ga\n\"\"\"\n\nimport argparse\nimport random\nimport sys\nfrom collections import deque\nimport itertools\n\nimport numpy as np\nimport pygame\n\n\ndef update_screen():\n pygame.display.update()\n\n\ndef display(player, food, game, record):\n game.game_display.fill((255, 255, 255))\n display_ui(game, game.score, record)\n player.display_player(game)\n food.display_food(game)\n image = pygame.surfarray.array3d(pygame.display.get_surface()).swapaxes(0, 1)\n return image\n\n\ndef display_ui(game, score, record):\n myfont = pygame.font.SysFont('Segoe UI', 20)\n myfont_bold = pygame.font.SysFont('Segoe UI', 20, True)\n text_score = myfont.render('SCORE: ', True, (0, 0, 0))\n text_score_number = myfont.render(str(score), True, (0, 0, 0))\n text_highest = myfont.render('HIGHEST SCORE: ', True, (0, 0, 0))\n text_highest_number = myfont_bold.render(str(record), True, (0, 0, 0))\n game.game_display.blit(text_score, (45, game.height + 20))\n game.game_display.blit(text_score_number, (120, game.height + 20))\n game.game_display.blit(text_highest, (190, game.height + 20))\n game.game_display.blit(text_highest_number, (350, game.height + 20))\n new_surf = pygame.pixelcopy.make_surface(game.bg)\n game.game_display.blit(new_surf, (0, 0))\n\n\ndef get_record(score, record):\n if score >= record:\n return score\n else:\n return record\n\n\nclass Snake:\n num_actions = 3\n fcn_state_size = 12\n\n def __init__(self, width, height, block_size=20, state_scale=1):\n pygame.display.set_caption(\"Snake_RL\")\n self.width = width\n self.height = height\n self.state_w = width // block_size * state_scale\n self.state_h = height // block_size * state_scale\n self.state_scale = state_scale\n self.bg = np.ones((width, height, 3), dtype=np.uint8) * 255\n self.bg[:block_size, :, :] = 0\n self.bg[:, :block_size, :] = 0\n self.bg[-block_size:, :, :] = 0\n self.bg[:, -block_size:, :] = 0\n self.bg = self.bg.swapaxes(0, 1)\n self.diagonal = np.sqrt((self.state_w - 1) ** 2 + (self.state_h - 1) ** 2)\n\n self.game_display = pygame.display.set_mode((width, height + 40))\n self.game_buffer = None\n self.score = 0\n self.crash = False\n self.block_size = block_size\n self.player = Player(self)\n self.food = Food(self)\n self.actions = {\n 0: np.array([1, 0, 0]),\n 1: np.array([0, 1, 0]),\n 2: np.array([0, 0, 1])\n }\n\n def get_state_cnn(self):\n state = np.ones((self.state_h, self.state_w, 3), dtype=np.uint8)*255\n # draw borders\n state[:self.state_scale, :, :] = 0\n state[:, :self.state_scale, :] = 0\n state[-self.state_scale:, :, :] = 0\n state[:, -self.state_scale:, :] = 0\n\n state = self.player.update_state_cnn(state, self)\n state = self.food.update_state_cnn(state, self)\n return state\n\n def get_state_fcn(self):\n # this state model is from https://github.com/henniedeharder/snake\n\n # wall check\n if self.player.y >= self.height*0.75:\n wall_up, wall_down = 1, 0\n elif self.player.y <= self.height*0.25:\n wall_up, wall_down = 0, 1\n else:\n wall_up, wall_down = 0, 0\n if self.player.x>= self.width*0.75:\n wall_right, wall_left = 1, 0\n elif self.player.x <= self.width*0.25:\n wall_right, wall_left = 0, 1\n else:\n wall_right, wall_left = 0, 0\n\n def distance(body,head):\n return (body[0]-head[0])**2 + (body[1]-head[1])**2\n\n # body close\n body_up = []\n body_right = []\n body_down = []\n body_left = []\n if len(self.player.position) > 3:\n for body in itertools.islice(self.player.position,3,None):\n if distance(body,[self.player.x,self.player.y]) < 2 * self.block_size:\n if body[1] < self.player.y:\n body_down.append(1)\n elif body[1] > self.player.y:\n body_up.append(1)\n if body[0] < self.player.x:\n body_left.append(1)\n elif body[0] > self.player.x:\n body_right.append(1)\n\n if len(body_up) > 0:\n body_up = 1\n else:\n body_up = 0\n if len(body_right) > 0:\n body_right = 1\n else:\n body_right = 0\n if len(body_down) > 0:\n body_down = 1\n else:\n body_down = 0\n if len(body_left) > 0:\n body_left = 1\n else:\n body_left = 0\n\n # state: apple_up, apple_right, apple_down, apple_left, obstacle_up, obstacle_right, obstacle_down, obstacle_left, direction_up, direction_right, direction_down, direction_left\n state = [int(self.player.y < self.food.y_food), int(self.player.x < self.food.x_food), int(self.player.y > self.food.y_food),\n int(self.player.x > self.food.x_food), \\\n int(wall_up or body_up), int(wall_right or body_right), int(wall_down or body_down),\n int(wall_left or body_left), \\\n int(self.player.delta_x == 0 and self.player.delta_y< 0), int(self.player.delta_x >0 and self.player.delta_y == 0),\n int(self.player.delta_x ==0 and self.player.delta_y > 0), int(self.player.delta_x <0 and self.player.delta_y == 0)]\n\n return state\n\n def reset(self):\n self.player.reset()\n self.food.reset()\n self.score = 0\n self.crash = False\n\n\nclass Player:\n def __init__(self, game):\n self.game = game\n self.image = pygame.image.load(\"assets/green_square.jpg\")\n self.image = pygame.transform.scale(self.image, (game.block_size, game.block_size))\n self.head_image = pygame.image.load(\"assets/snake_head.png\")\n self.head_image = pygame.transform.scale(self.head_image, (game.block_size, game.block_size))\n # start in the center of the screen\n self.x = self.game.width // 2\n self.y = self.game.height // 2\n # mod by grid size so its grid aligned.\n self.x = self.x - self.x % self.game.block_size\n self.y = self.y - self.y % self.game.block_size\n self.prev_x = self.x\n self.prev_y = self.y\n\n self.position = deque()\n self.position.append([self.x, self.y])\n # self.position.extend(\n # [[self.x - 2 * game.block_size, self.y], [self.x - game.block_size, self.y], [self.x, self.y]])\n self.delta_x = self.game.block_size\n self.delta_y = 0\n self.food = 1\n self.eaten = False\n\n def reset(self):\n # start in the center of the screen\n self.x = self.game.width // 2\n self.y = self.game.height // 2\n # mod by grid size so its grid aligned.\n self.x = self.x - self.x % self.game.block_size\n self.y = self.y - self.y % self.game.block_size\n self.position = deque()\n self.position.append([self.x, self.y])\n\n self.delta_x = self.game.block_size\n self.delta_y = 0\n self.food = 1\n self.eaten = False\n\n def update_position(self, x, y):\n if x != self.position[-1][0] or y != self.position[-1][1]:\n if self.food > 1:\n self.position.append(self.position.popleft())\n\n self.position[-1][0] = x\n self.position[-1][1] = y\n\n def move(self, move, x, y, game, food):\n \"\"\"\n handle a move from the agent/player.\n :param move:\n :param x:\n :param y:\n :param game:\n :param food:\n :return:\n \"\"\"\n if self.eaten:\n self.position.append([x, y])\n self.eaten = False\n self.food += 1\n # check if we moved left or right. or no move.\n if np.array_equal(move, [0, 1, 0]) and self.delta_y == 0:\n # right - going horizontal\n self.delta_y = self.delta_x\n self.delta_x = 0\n elif np.array_equal(move, [0, 1, 0]) and self.delta_x == 0:\n # right - going vertical\n self.delta_x = -self.delta_y\n self.delta_y = 0\n elif np.array_equal(move, [0, 0, 1]) and self.delta_y == 0:\n # left - going horizontal\n self.delta_y = -self.delta_x\n self.delta_x = 0\n elif np.array_equal(move, [0, 0, 1]) and self.delta_x == 0:\n # left - going vertical\n self.delta_x = self.delta_y\n self.delta_y = 0\n\n self.prev_x = self.x\n self.prev_y = self.y\n\n self.x = x + self.delta_x\n self.y = y + self.delta_y\n\n if self.x < game.block_size or self.x > game.width - 2 * game.block_size \\\n or self.y < game.block_size \\\n or self.y > game.height - 2 * game.block_size \\\n or [self.x, self.y] in self.position:\n game.crash = True\n\n if self.x == food.x_food and self.y == food.y_food:\n food.next_food(game, self)\n self.eaten = True\n game.score = game.score + 1\n\n self.update_position(self.x, self.y)\n\n def update_state_cnn(self, state, game):\n if not game.crash:\n for i in range(self.food):\n x_temp, y_temp = self.position[len(self.position) - 1 - i]\n x_temp = x_temp // game.block_size * game.state_scale\n y_temp = y_temp // game.block_size * game.state_scale\n if i == 0:\n state[y_temp, x_temp,:] = [0,128,0]\n else:\n state[y_temp, x_temp,:] = [0,255,0]\n return state\n\n def display_player(self, game):\n # if not game.crash:\n for i in range(1,self.food):\n game.game_display.blit(self.image, self.position[len(self.position) - 1 - i])\n\n game.game_display.blit(self.head_image,self.position[-1])\n update_screen()\n # else:\n # pygame.time.wait(300)\n\n\nclass Food:\n def __init__(self, game):\n self.game = game\n self.image = pygame.image.load(\"assets/apple.png\")\n self.image = pygame.transform.scale(self.image, (game.block_size, game.block_size))\n # mod by grid size so its grid aligned.\n self.x_food = random.randint(game.block_size, game.width - 2 * game.block_size)\n self.x_food = self.x_food - self.x_food % game.block_size\n self.y_food = random.randint(game.block_size, game.height - 2 * game.block_size)\n self.y_food = self.y_food - self.y_food % game.block_size\n\n def reset(self):\n # mod by grid size so its grid aligned.\n self.x_food = random.randint(self.game.block_size, self.game.width - 2 * self.game.block_size)\n self.x_food = self.x_food - self.x_food % self.game.block_size\n self.y_food = random.randint(self.game.block_size, self.game.height - 2 * self.game.block_size)\n self.y_food = self.y_food - self.y_food % self.game.block_size\n\n def next_food(self, game, player):\n x_food= random.randint(game.block_size, game.width - 2 * game.block_size)\n x_food = x_food - x_food % game.block_size\n\n y_food = random.randint(game.block_size, game.height - 2 * game.block_size)\n y_food = y_food - y_food % game.block_size\n if [x_food, y_food] not in player.position and x_food != self.x_food and y_food != self.y_food:\n self.x_food = x_food\n self.y_food = y_food\n return self.x_food, self.y_food\n else:\n self.next_food(game, player)\n\n def display_food(self, game):\n game.game_display.blit(self.image, (self.x_food, self.y_food))\n update_screen()\n\n def update_state_cnn(self, state, game):\n x = self.x_food // game.block_size * game.state_scale\n y = self.y_food // game.block_size * game.state_scale\n state[y, x, :] = [0,0,255]\n return state\n\n\ndef run_game(speed):\n # Set options to activate or deactivate the game view, and its speed\n pygame.font.init()\n pygame.init()\n main = True\n game = Snake(500, 500, 20)\n player = game.player\n food = game.food\n record = 0\n # init move\n player.move([1, 0, 0], player.x, player.y, game, food)\n display(player, food, game, record)\n while main:\n move = [1, 0, 0]\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n main = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT or event.key == ord('a'):\n move = [0, 0, 1]\n if event.key == pygame.K_RIGHT or event.key == ord('d'):\n move = [0, 1, 0]\n\n if event.type == pygame.KEYUP:\n if event.key == ord('q'):\n pygame.quit()\n sys.exit()\n main = False\n if event.key == ord('r'):\n game.reset()\n\n record = get_record(game.score, record)\n player.move(move, player.x, player.y, game, food)\n if not game.crash:\n img = display(player, food, game, record)\n pygame.time.wait(speed)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--speed\", type=int, default=1)\n args = parser.parse_args()\n run_game(100 - args.speed)\n" ]
[ [ "numpy.array", "numpy.sqrt", "numpy.array_equal", "numpy.ones" ] ]
mjw99/Musketeer
[ "0299a7974ad90c09d8d9206fcf862e45f9fddf30" ]
[ "musketeer/equilibriumConstants.py" ]
[ "import tkinter as tk\n\nimport numpy as np\n\nfrom . import moduleFrame\n\n\nclass GetKsCustom():\n def __init__(self, titration):\n self.titration = titration\n popup = tk.Toplevel()\n popup.title(\"Edit equilibrium constant values\")\n popup.grab_set()\n # TODO: implement\n\n def __call__(self, kVars):\n kVars = np.insert(kVars, 0, 1)\n return self.titration.ksMatrix @ kVars\n\n\nclass GetKsAll():\n # when every equilibrium constant is unknown and independent\n def __init__(self, titration):\n self.titration = titration\n titration.ksMatrix = np.identity(titration.boundCount)\n\n def __call__(self, kVars):\n return kVars\n\n\nclass ModuleFrame(moduleFrame.ModuleFrame):\n frameLabel = \"Equilibrium constants\"\n dropdownLabelText = \"Which Ks to optimise?\"\n dropdownOptions = {\n \"Optimise all Ks\": GetKsAll,\n \"Custom\": GetKsCustom\n }\n attributeName = \"getKs\"\n" ]
[ [ "numpy.identity", "numpy.insert" ] ]
eposs/solution_scattering
[ "c9e1570cdc7ad0b5b9303770e798bd0bb71650c3" ]
[ "quickplots.py" ]
[ "import glob\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom saxs_plots import real_space_plotter\n\ndats = glob.glob('./*dat')\ndiffs = [item for item in dats if \"diff\" in item]\nsums = [item for item in dats if \"sum\" in item]\navgs = [item for item in dats if \"diff\" not in item]\nspfs = [item for item in dats if \"spf\" in item]\n\n\nlow = [item for item in diffs if \"3C\" in item]\nmed = [item for item in diffs if \"11C\" in item]\nhigh = [item for item in diffs if \"19C\" in item]\n\npc0s = [item for item in dats if \"PC0\" in item]\n\n\nsubselection = [\"-10.1us\", \"562ns\", \"750ns\", \"1us\", \"1.33us\", \"1.78us\", \"2.37us\", \"3.16us\", \"4.22us\", \"5.62us\"]\n\n\n\nDATAA = []\nlabels = []\nii = -1\nfor item in spfs:\n # for sub in subselection:\n # if sub in item:\n # d1, samp, temp, dtype,d5,d6,d7 = item.split('_')\n # labels.append(temp)\n if ii < 0:\n ii=0\n nii = ii\n else:\n N = plt.cm.inferno.N\n ii += int(N/len(subselection))\n nii = N-ii \n data = pd.read_table(item,skiprows=1,names=['q','SA','sigSA'],delim_whitespace=True,engine='python')\n DATAA.append(data)\n labels.append(item)\n# plt.plot(data.q,data.I,label=item.split('_')[-1].replace('.dat',''),color=plt.cm.inferno(nii))\n# plt.legend()\n# plt.xscale('log')\n# plt.show()\n\nreal_space_plotter(DATAA, name='output', labels=labels)\n" ]
[ [ "pandas.read_table" ] ]
qin-yu/elf
[ "bb8e0a41c1c2539ac6f866271751139271fbeeb1" ]
[ "elf/parallel/operations.py" ]
[ "import multiprocessing\n# would be nice to use dask for all of this instead of concurrent.futures\n# so that this could be used on a cluster as well\nfrom concurrent import futures\nfrom numbers import Number\nfrom functools import partial\nfrom tqdm import tqdm\n\nfrom .common import get_blocking\nfrom ..util import set_numpy_threads\nset_numpy_threads(1)\nimport numpy as np\n\n\ndef _compute_broadcast(shapex, shapey):\n broadcast = []\n for shx, shy in zip(shapex, shapey):\n if shx == shy:\n broadcast.append(False)\n elif shy == 1:\n broadcast.append(True)\n else:\n raise ValueError(\"Cannot broadcast shapes %s and %s\" % (str(shapex, str(shapey))))\n return broadcast\n\n\ndef isin(x, y, out=None,\n block_shape=None, n_threads=None,\n mask=None, verbose=False, roi=None):\n \"\"\" Compute np.isin in parallel.\n\n Arguments:\n x [array_like] - operand 1, numpy array or similar like h5py or zarr dataset\n y [array_like or scalar] - operand 2, scalar, numpy array or list\n out [array_like] - output, by default the operation\n is done inplace in the first operand (default: None)\n block_shape [tuple] - shape of the blocks used for parallelisation,\n by default chunks of the input will be used, if available (default: None)\n n_threads [int] - number of threads, by default all are used (default: None)\n mask [array_like] - mask to exclude data from the computation (default: None)\n verbose [bool] - verbosity flag (default: False)\n roi [tuple[slice]] - region of interest for this computation (default: None)\n Returns:\n array_like - output\n \"\"\"\n\n # check the mask if given\n if mask is not None and mask.shape != x.shape:\n raise ValueError(\"Invalid mask shape, got %s, expected %s (= shape of first operand)\" % (str(mask.shape),\n str(x.shape)))\n\n if out is None:\n out = x\n elif x.shape != out.shape:\n raise ValueError(\"Expect x and out of same shape, got %s and %s\" % (str(x.shape),\n str(out.shape)))\n\n n_threads = multiprocessing.cpu_count() if n_threads is None else n_threads\n blocking = get_blocking(x, block_shape, roi)\n n_blocks = blocking.numberOfBlocks\n\n def _isin(block_id):\n block = blocking.getBlock(block_id)\n bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))\n\n # check if we have a mask and if we do if we\n # have pixels in the mask\n if mask is not None:\n m = mask[bb].astype('bool')\n if m.sum() == 0:\n return None\n\n # load the data and apply the mask if given\n xx = x[bb]\n if mask is None:\n xx = np.isin(xx, y)\n else:\n xx[m] = np.isin(xx[m], y)\n out[bb] = xx\n\n with futures.ThreadPoolExecutor(n_threads) as tp:\n if verbose:\n list(tqdm(tp.map(_isin, range(n_blocks)), total=n_blocks))\n else:\n tp.map(_isin, range(n_blocks))\n\n return out\n\n\ndef apply_operation(x, y, operation, out=None,\n block_shape=None, n_threads=None,\n mask=None, verbose=False, roi=None):\n \"\"\" Apply operation to two operands in parallel.\n\n Arguments:\n x [array_like] - operand 1, numpy array or similar like h5py or zarr dataset\n y [array_like or scalar] - operand 2, numpy array or similar like h5py or zarr dataset\n or scalar\n operation [callable] - operation applied to the two operands\n out [array_like] - output, by default the operation\n is done inplace in the first operand (default: None)\n block_shape [tuple] - shape of the blocks used for parallelisation,\n by default chunks of the input will be used, if available (default: None)\n n_threads [int] - number of threads, by default all are used (default: None)\n mask [array_like] - mask to exclude data from the computation (default: None)\n verbose [bool] - verbosity flag (default: False)\n roi [tuple[slice]] - region of interest for this computation (default: None)\n Returns:\n array_like - output\n \"\"\"\n\n # check type and dimension of the second operand and check if we need to broadcast\n scalar_operand = isinstance(y, Number)\n if scalar_operand:\n broadcast = False\n else:\n # TODO we ned to check for array_like here to also allow h5py, z5py, etc.\n if not isinstance(y, np.ndarray):\n raise ValueError(\"Expected second operand to be scalar or numpy array, got %s\" % type(y))\n # check that the dimensions of operators\n if x.ndim != y.ndim:\n raise ValueError(\"Dimensions of operands do not match: %i, %i\" % (x.ndim, y.ndim))\n # if the shapes disagree, check if we can broadcast\n broadcast = False if x.shape == y.shape else _compute_broadcast(x.shape, y.shape)\n\n # broadcasting and masking is not supported yet\n if mask is not None and broadcast:\n raise NotImplementedError(\"Broadcasting and masking is not implemented yet\")\n\n # check the mask if given\n if mask is not None and mask.shape != x.shape:\n raise ValueError(\"Invalid mask shape, got %s, expected %s (= shape of first operand)\" % (str(mask.shape),\n str(x.shape)))\n\n if out is None:\n out = x\n elif x.shape != out.shape:\n raise ValueError(\"Expect x and out of same shape, got %s and %s\" % (str(x.shape),\n str(out.shape)))\n\n n_threads = multiprocessing.cpu_count() if n_threads is None else n_threads\n blocking = get_blocking(x, block_shape, roi)\n n_blocks = blocking.numberOfBlocks\n\n def _apply_scalar(block_id):\n block = blocking.getBlock(block_id)\n bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))\n\n # check if we have a mask and if we do if we\n # have pixels in the mask\n if mask is not None:\n m = mask[bb].astype('bool')\n if m.sum() == 0:\n return None\n\n # load the data and apply the mask if given\n xx = x[bb]\n if mask is None:\n xx = operation(xx, y)\n else:\n xx[m] = operation(xx[m], y)\n out[bb] = xx\n\n def _apply_array(block_id):\n block = blocking.getBlock(block_id)\n bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))\n # change the bounding boxes if inputs need to be broadcast\n if broadcast:\n bby = tuple(slice(None) if bcast else b for bcast, b in zip(broadcast, bb))\n else:\n bby = bb\n\n # check if we have a mask and if we do if we\n # have pixels in the mask\n if mask is not None:\n m = mask[bb].astype('bool')\n if m.sum() == 0:\n return None\n\n # load the data and apply the mask if given\n xx = x[bb]\n yy = y[bby]\n if mask is None:\n xx = operation(xx, yy)\n else:\n xx[m] = operation(xx[m], yy[m])\n out[bb] = xx\n\n _apply = _apply_scalar if scalar_operand else _apply_array\n with futures.ThreadPoolExecutor(n_threads) as tp:\n if verbose:\n list(tqdm(tp.map(_apply, range(n_blocks)), total=n_blocks))\n else:\n tp.map(_apply, range(n_blocks))\n\n return out\n\n\ndef apply_operation_single(x, operation, axis=None, out=None,\n block_shape=None, n_threads=None,\n mask=None, verbose=False, roi=None):\n \"\"\" Apply operation to single operand in parallel.\n\n Arguments:\n x [array_like] - operand 1, numpy array or similar like h5py or zarr dataset\n operation [callable] - operation applied to the two operands\n axis [int] - axis along which to apply the operation (default: Naone)\n out [array_like] - output, by default the operation\n is done inplace in the first operand (default: None)\n block_shape [tuple] - shape of the blocks used for parallelisation,\n by default chunks of the input will be used, if available (default: None)\n n_threads [int] - number of threads, by default all are used (default: None)\n mask [array_like] - mask to exclude data from the computation (default: None)\n verbose [bool] - verbosity flag (default: False)\n roi [tuple[slice]] - region of interest for this computation (default: None)\n Returns:\n array_like - output\n \"\"\"\n\n shape = x.shape\n if axis is not None:\n operation = partial(operation, axis=axis)\n shape = tuple(sh for ii, sh in enumerate(shape) if ii != axis)\n\n # check the mask if given\n if mask is not None and mask.shape != shape:\n raise ValueError(\"Invalid mask shape, got %s, expected %s (= shape of first operand)\" % (str(mask.shape),\n str(shape)))\n # if no output is given, apply this operation inplace\n if out is None:\n out = x\n\n # check the shape against the output shape\n if shape != out.shape:\n raise ValueError(\"Expect x and out of same shape, got %s and %s\" % (str(shape), str(out.shape)))\n\n n_threads = multiprocessing.cpu_count() if n_threads is None else n_threads\n blocking = get_blocking(out, block_shape, roi)\n n_blocks = blocking.numberOfBlocks\n\n def _apply(block_id):\n block = blocking.getBlock(block_id)\n bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))\n\n # check if we have a mask and if we do if we\n # have pixels in the mask\n if mask is not None:\n m = mask[bb].astype('bool')\n if m.sum() == 0:\n return None\n\n if axis is None:\n bb_in = bb\n else:\n bb_in = bb[:axis] + (slice(None),) + bb[axis:]\n\n # load the data and apply the mask if given\n xx = operation(x[bb_in])\n if mask is not None:\n xx[m] = 0\n out[bb] = xx\n\n with futures.ThreadPoolExecutor(n_threads) as tp:\n if verbose:\n list(tqdm(tp.map(_apply, range(n_blocks)), total=n_blocks))\n else:\n tp.map(_apply, range(n_blocks))\n\n return out\n\n\n# helper function to autogenerate parallel impls of common numpy operations\ndef _generate_operation(op_name):\n\n doc_str =\\\n \"\"\"Apply np.%s block-wise and in parallel.\n\n Arguments:\n x [array_like] - operand 1, numpy array or similar like h5py or zarr dataset\n y [array_like or scalar] - operand 2, numpy array, h5py or zarr dataset or scalar\n out [array_like] - output, by default the operation\n is done inplace in the first operand (default: None)\n block_shape [tuple] - shape of the blocks used for parallelisation,\n by default chunks of the input will be used, if available (default: None)\n n_threads [int] - number of threads, by default all are used (default: None)\n mask [array_like] - mask to exclude data from the computation (default: None)\n verbose [bool] - verbosity flag (default: False)\n roi [tuple[slice]] - region of interest for this computation (default: None)\n Returns:\n array_like - output\n \"\"\" % op_name\n\n def op(x, y, out=None, block_shape=None, n_threads=None,\n mask=None, verbose=False, roi=None):\n return apply_operation(x, y, getattr(np, op_name), block_shape=block_shape,\n n_threads=n_threads, mask=mask, verbose=verbose,\n out=out, roi=roi)\n\n op.__doc__ = doc_str\n op.__name__ = op_name\n globals()[op_name] = op\n\n\n# autogenerate parallel implementation for common numpy operations\n_op_names = ['add', 'subtract', 'multiply', 'divide',\n 'greater', 'greater_equal', 'less', 'less_equal',\n 'minimum', 'maximum']\n\n\nfor op_name in _op_names:\n _generate_operation(op_name)\n\ndel _generate_operation\ndel _op_names\n\n\n# TODO autogenerate parallel implementation for common single operand numpy operations\n# _op_nams = ['mean', 'max', 'min', 'std']\n" ]
[ [ "numpy.isin" ] ]
igorperic17/object_detection_tf_example
[ "4d79eb45f5cf05af51e1055f72e4226dfa0f3538" ]
[ "benchmark.py" ]
[ "import tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\n\ntf.enable_v2_behavior()\n\nfrom tensorflow.python.framework.ops import disable_eager_execution\ndisable_eager_execution()\n\nfrom tensorflow.python.compiler.mlcompute import mlcompute\nmlcompute.set_mlc_device(device_name='gpu')\n\n\n(ds_train, ds_test), ds_info = tfds.load(\n 'mnist',\n split=['train', 'test'],\n shuffle_files=True,\n as_supervised=True,\n with_info=True,\n)\n\ndef normalize_img(image, label):\n \"\"\"Normalizes images: `uint8` -> `float32`.\"\"\"\n return tf.cast(image, tf.float32) / 255., label\n\nbatch_size = 128\n\nds_train = ds_train.map(\n normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)\nds_train = ds_train.cache()\nds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)\nds_train = ds_train.batch(batch_size)\nds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)\n\n\nds_test = ds_test.map(\n normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)\nds_test = ds_test.batch(batch_size)\nds_test = ds_test.cache()\nds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)\n\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(32, kernel_size=(3, 3),\n activation='relu'),\n tf.keras.layers.Conv2D(64, kernel_size=(3, 3),\n activation='relu'),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n# tf.keras.layers.Dropout(0.25),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation='relu'),\n# tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10, activation='softmax')\n])\nmodel.compile(\n loss='sparse_categorical_crossentropy',\n optimizer=tf.keras.optimizers.Adam(0.001),\n metrics=['accuracy'],\n)\n\nmodel.fit(\n ds_train,\n epochs=12,\n validation_data=ds_test,\n)" ]
[ [ "tensorflow.compat.v2.keras.layers.Flatten", "tensorflow.compat.v2.enable_v2_behavior", "tensorflow.compat.v2.keras.layers.MaxPooling2D", "tensorflow.python.compiler.mlcompute.mlcompute.set_mlc_device", "tensorflow.compat.v2.keras.layers.Dense", "tensorflow.compat.v2.keras.optimizers.Adam", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.keras.layers.Conv2D", "tensorflow.python.framework.ops.disable_eager_execution" ] ]
kasimte/RLs
[ "0eba84bd7cc571269f874b65923bec2188828ef6" ]
[ "gym_wrapper.py" ]
[ "import gym\r\nimport numpy as np\r\nimport threading\r\n\r\n\r\nclass MyThread(threading.Thread):\r\n\r\n def __init__(self, func, args=()):\r\n super().__init__()\r\n self.func = func\r\n self.args = args\r\n\r\n def run(self):\r\n self.result = self.func(*self.args)\r\n\r\n def get_result(self):\r\n try:\r\n return self.result\r\n except Exception:\r\n return None\r\n\r\n\r\nclass gym_envs(object):\r\n\r\n def __init__(self, gym_env_name, n):\r\n self.n = n\r\n self.envs = [gym.make(gym_env_name) for _ in range(self.n)]\r\n self.observation_space = self.envs[0].observation_space\r\n self.obs_type = 'visual' if len(self.observation_space.shape) == 3 else 'vector'\r\n self.reward_threshold = self.envs[0].env.spec.reward_threshold\r\n if type(self.envs[0].action_space) == gym.spaces.box.Box:\r\n self.a_type = 'continuous'\r\n elif type(self.envs[0].action_space) == gym.spaces.tuple.Tuple:\r\n self.a_type = 'Tuple(Discrete)'\r\n else:\r\n self.a_type = 'discrete'\r\n self.action_space = self.envs[0].action_space\r\n\r\n def render(self):\r\n self.envs[0].render()\r\n\r\n def close(self):\r\n [env.close() for env in self.envs]\r\n \r\n def sample_action(self):\r\n return np.array([env.action_space.sample() for env in self.envs])\r\n\r\n def reset(self):\r\n self.dones_index = []\r\n threadpool = []\r\n for i in range(self.n):\r\n th = MyThread(self.envs[i].reset, args=())\r\n threadpool.append(th)\r\n for th in threadpool:\r\n th.start()\r\n for th in threadpool:\r\n threading.Thread.join(th)\r\n if self.obs_type == 'visual':\r\n return np.array([threadpool[i].get_result()[np.newaxis, :] for i in range(self.n)])\r\n else:\r\n return np.array([threadpool[i].get_result() for i in range(self.n)])\r\n\r\n def step(self, actions):\r\n if self.a_type == 'discrete':\r\n actions = actions.reshape(-1,)\r\n elif self.a_type == 'Tuple(Discrete)':\r\n actions = actions.reshape(self.n, -1).tolist()\r\n threadpool = []\r\n for i in range(self.n):\r\n th = MyThread(self.envs[i].step, args=(actions[i], ))\r\n threadpool.append(th)\r\n for th in threadpool:\r\n th.start()\r\n for th in threadpool:\r\n threading.Thread.join(th)\r\n if self.obs_type == 'visual':\r\n results = [\r\n [threadpool[i].get_result()[0][np.newaxis, :], *threadpool[i].get_result()[1:]]\r\n for i in range(self.n)]\r\n else:\r\n results = [threadpool[i].get_result() for i in range(self.n)]\r\n obs, reward, done, info = [np.array(e) for e in zip(*results)]\r\n self.dones_index = np.where(done)[0]\r\n return obs, reward, done, info\r\n \r\n def patial_reset(self):\r\n threadpool = []\r\n for i in self.dones_index:\r\n th = MyThread(self.envs[i].reset, args=())\r\n threadpool.append(th)\r\n for th in threadpool:\r\n th.start()\r\n for th in threadpool:\r\n threading.Thread.join(th)\r\n if self.obs_type == 'visual':\r\n return np.array([threadpool[i].get_result()[np.newaxis, :] for i in range(self.dones_index.shape[0])])\r\n else:\r\n return np.array([threadpool[i].get_result() for i in range(self.dones_index.shape[0])])\r\n\r\n" ]
[ [ "numpy.array", "numpy.where" ] ]
asmaalrawi/geopm
[ "e93548dfdd693a17c81163787ba467891937356d" ]
[ "integration/experiment/power_sweep/gen_plot_node_efficiency.py" ]
[ "#!/usr/bin/env python\n#\n# Copyright (c) 2015, 2016, 2017, 2018, 2019, 2020, Intel Corporation\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\n'''\nExample power sweep experiment using geopmbench.\n'''\n\nimport sys\nimport os\nimport pandas\nimport matplotlib.pyplot as plt\nimport argparse\n\nimport geopmpy.io\n\nfrom experiment import common_args\nfrom experiment import machine\n\n\ndef generate_histogram(data, app_name, min_drop, max_drop, label, bin_size,\n xprecision, output_dir):\n data = data[label]\n fontsize = 12\n fig_size = (8, 4)\n verbose = True\n\n if label.lower() == 'power':\n axis_units = 'W'\n title_units = 'W'\n range_factor = 1\n title = '{}: Histogram of Power (No Capping)'.format(app_name)\n bar_color = 'red'\n elif label.lower() == 'frequency':\n axis_units = 'GHz'\n title_units = 'MHz'\n range_factor = 1000\n title = '{} Histogram of Achieved Frequency'.format(app_name)\n bar_color = 'blue'\n elif label.lower() == 'energy':\n axis_units = 'J'\n title_units = 'J'\n range_factor = 1\n title = '{} Histogram of Energy'.format(app_name)\n bar_color = 'cyan'\n else:\n raise RuntimeError(\"<geopmpy>: Unknown type for histogram: {}\".format(label))\n\n plt.figure(figsize=fig_size)\n bins = [round(bb*bin_size, 3) for bb in range(int(min_drop/bin_size), int(max_drop/bin_size)+2)]\n n, bins, patches = plt.hist(data, rwidth=0.8, bins=bins, color=bar_color)\n for n, b in zip(n, bins):\n plt.annotate(int(n) if int(n) != 0 else \"\", xy=(b+bin_size/2.0, n+2.5),\n horizontalalignment='center',\n fontsize=fontsize-4)\n min_max_range = (max(data) - min(data)) * range_factor\n mean = data.mean() * range_factor\n\n n = len(data)\n trim_pct = 0.05\n trimmed_data = data[int(n*trim_pct):n-int(trim_pct*n)]\n trimmed_min_max = (max(trimmed_data) - min(trimmed_data)) * range_factor\n plt.title('{}\\nMin-max Var.: {} {}; {}% Min-max Var.: {} {}; Mean: {} {}'\n .format(title, round(min_max_range, 3), title_units,\n int((trim_pct)*100), round(trimmed_min_max, 3), title_units,\n round(mean, 3), title_units),\n fontsize=fontsize)\n plt.xlabel('{} ({})'.format(label.title(), axis_units), fontsize=fontsize)\n plt.ylabel('Count', fontsize=fontsize)\n plt.xticks([b+bin_size/2.0 for b in bins],\n [' [{start:.{prec}f}, {end:.{prec}f})'.format(start=b, end=b+bin_size, prec=xprecision) for b in bins],\n rotation='vertical',\n fontsize=fontsize-4)\n _, ylabels = plt.yticks()\n plt.setp(ylabels, fontsize=fontsize-4)\n\n plt.margins(0.02, 0.2)\n plt.axis('tight')\n\n plt.tight_layout()\n fig_dir = os.path.join(output_dir, 'figures')\n if not os.path.exists(fig_dir):\n os.makedirs(fig_dir)\n\n filename = '{}_{}_histo'.format(app_name.lower().replace('@', '_').replace(' ', '_'), label)\n # todo: could be a method\n for ext in ['png']:\n full_path = os.path.join(fig_dir, '{}.{}'.format(filename, ext))\n plt.savefig(full_path)\n if verbose:\n sys.stdout.write(' {}\\n'.format(full_path))\n plt.close()\n\n\ndef achieved_freq_histogram_package(app_name, output_dir, report_df, detailed=False):\n # min and max are used to create consistent x-axis limits\n # sticker frequency is used when converting the percent-of-sticker to\n # a value in hertz\n mach = machine.get_machine(output_dir)\n min_freq = mach.frequency_min()\n max_freq = mach.frequency_max()\n sticker_freq = mach.frequency_sticker()\n step_freq = mach.frequency_step()\n\n report_df['power_limit'] = report_df['POWER_PACKAGE_LIMIT_TOTAL']\n\n temp_df = report_df.copy()\n report_df['frequency'] = report_df['CYCLES_THREAD@package-0'] / report_df['CYCLES_REFERENCE@package-0']\n temp_df['frequency'] = temp_df['CYCLES_THREAD@package-1'] / temp_df['CYCLES_REFERENCE@package-1']\n report_df['freq_package'] = 0\n temp_df['freq_package'] = 1\n report_df.set_index(['power_limit', 'host', 'freq_package'], inplace=True)\n temp_df.set_index(['power_limit', 'host', 'freq_package'], inplace=True)\n report_df = report_df.append(temp_df)\n # convert percent to GHz frequency based on sticker\n report_df['frequency'] *= sticker_freq / 1e9\n\n profiles = report_df['POWER_PACKAGE_LIMIT_TOTAL'].unique()\n power_caps = sorted(profiles) # list(range(self._min_power, self._max_power+1, self._step_power))\n gov_freq_data = {}\n bal_freq_data = {}\n for target_power in power_caps:\n governor_data = report_df.loc[report_df[\"Agent\"] == \"power_governor\"]\n governor_data = governor_data.loc[governor_data['POWER_PACKAGE_LIMIT_TOTAL'] == target_power]\n gov_freq_data[target_power] = governor_data.groupby(['host', 'freq_package']).mean()['frequency'].sort_values()\n gov_freq_data[target_power] = pandas.DataFrame(gov_freq_data[target_power])\n if detailed:\n sys.stdout.write('Governor data @ {}W:\\n{}\\n'.format(target_power, gov_freq_data[target_power]))\n\n balancer_data = report_df.loc[report_df[\"Agent\"] == \"power_balancer\"]\n balancer_data = balancer_data.loc[balancer_data['POWER_PACKAGE_LIMIT_TOTAL'] == target_power]\n bal_freq_data[target_power] = balancer_data.groupby(['host', 'freq_package']).mean()['frequency'].sort_values()\n bal_freq_data[target_power] = pandas.DataFrame(bal_freq_data[target_power])\n if detailed:\n sys.stdout.write('Balancer data @ {}W:\\n{}\\n'.format(target_power, bal_freq_data[target_power]))\n\n # plot histograms\n min_drop = min_freq / 1e9\n max_drop = (sticker_freq + step_freq) / 1e9\n #max_drop = (max_freq - step_freq) / 1e9 # turbo range\n bin_size = step_freq / 1e9 / 2.0\n for target_power in power_caps:\n gov_data = gov_freq_data[target_power]\n bal_data = bal_freq_data[target_power]\n\n name = app_name + \"@\" + str(target_power) + \"W Governor\"\n generate_histogram(gov_data, name, min_drop, max_drop, 'frequency',\n bin_size, 3, output_dir)\n name = app_name + \"@\" + str(target_power) + \"W Balancer\"\n generate_histogram(bal_data, name, min_drop, max_drop, 'frequency',\n bin_size, 3, output_dir)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n common_args.add_output_dir(parser)\n common_args.add_show_details(parser)\n common_args.add_label(parser)\n args = parser.parse_args()\n\n output_dir = args.output_dir\n show_details = args.show_details\n label = args.label\n output = geopmpy.io.RawReportCollection(\"*report\", dir_name=output_dir)\n achieved_freq_histogram_package(app_name=label,\n output_dir=output_dir,\n report_df=output.get_epoch_df(),\n detailed=show_details)\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.figure", "matplotlib.pyplot.margins", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.setp", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.yticks", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel" ] ]
googleinterns/smart-content-summary
[ "595c8e2cb0e160a87cacb954a2a030953fdce6c5" ]
[ "classifier/run_classifier_utils.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for building a LaserTagger TF model.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom bert import modeling, optimization\nimport tensorflow as tf\n\n\nclass LaserTaggerConfig(modeling.BertConfig):\n \"\"\"Model configuration for LaserTagger.\"\"\"\n def __init__(self, **kwargs):\n \"\"\"Initializes an instance of LaserTagger configuration.\n\n This initializer expects BERT specific arguments.\n \"\"\"\n super(LaserTaggerConfig, self).__init__(**kwargs)\n\n\nclass ModelFnBuilder(object):\n \"\"\"Class for building `model_fn` closure for TPUEstimator.\"\"\"\n def __init__(self, config, num_categories, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings, max_seq_length, classifier_type):\n \"\"\"Initializes an instance of a LaserTagger model.\n\n Args:\n config: LaserTagger model configuration.\n num_tags: Number of different tags to be predicted.\n init_checkpoint: Path to a pretrained BERT checkpoint (optional).\n learning_rate: Learning rate.\n num_train_steps: Number of training steps.\n num_warmup_steps: Number of warmup steps.\n use_tpu: Whether to use TPU.\n use_one_hot_embeddings: Whether to use one-hot embeddings for word\n embeddings.\n max_seq_length: Maximum sequence length.\n classifier_type: Either Grammar or Meaning.\n \"\"\"\n self._config = config\n self._num_categories = num_categories\n self._init_checkpoint = init_checkpoint\n self._learning_rate = learning_rate\n self._num_train_steps = num_train_steps\n self._num_warmup_steps = num_warmup_steps\n self._use_tpu = use_tpu\n self._use_one_hot_embeddings = use_one_hot_embeddings\n self._max_seq_length = max_seq_length\n self._classifier_type = classifier_type\n\n def _create_model(self, mode, input_ids_source, input_mask_source,\n segment_ids_source, input_ids_summary, input_mask_summary,\n segment_ids_summary, labels):\n \"\"\"Creates a LaserTagger model.\"\"\"\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model_source = modeling.BertModel(\n config=self._config,\n is_training=is_training,\n input_ids=input_ids_source,\n input_mask=input_mask_source,\n token_type_ids=segment_ids_source,\n use_one_hot_embeddings=self._use_one_hot_embeddings)\n final_hidden_source = model_source.get_sequence_output()\n\n if self._classifier_type == \"Meaning\":\n model_summary = modeling.BertModel(\n config=self._config,\n is_training=is_training,\n input_ids=input_ids_summary,\n input_mask=input_mask_summary,\n token_type_ids=segment_ids_summary,\n use_one_hot_embeddings=self._use_one_hot_embeddings)\n final_hidden_summary = model_source.get_sequence_output()\n\n final_hidden = tf.concat([final_hidden_source, final_hidden_summary],\n axis=1)\n else:\n final_hidden = final_hidden_source\n\n if is_training:\n # I.e., 0.1 dropout\n final_hidden = tf.nn.dropout(final_hidden, keep_prob=0.9)\n\n layer1_output = tf.layers.dense(\n final_hidden,\n 1,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),\n name=\"layer1\")\n\n if self._classifier_type == \"Meaning\":\n flattened_layer1_output = tf.reshape(layer1_output,\n [-1, self._max_seq_length * 2])\n else:\n flattened_layer1_output = tf.reshape(layer1_output,\n [-1, self._max_seq_length])\n logits = tf.expand_dims(\n tf.layers.dense(\n flattened_layer1_output,\n self._num_categories,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),\n name=\"layer2\"), 1)\n\n with tf.variable_scope(\"loss\"):\n loss = None\n per_example_loss = None\n if mode != tf.estimator.ModeKeys.PREDICT:\n per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=logits)\n loss = tf.reduce_mean(per_example_loss)\n pred = tf.cast(tf.argmax(logits, axis=-1), tf.int32)\n else:\n pred = tf.cast(tf.argmax(logits, axis=-1), tf.int32)\n\n return (loss, per_example_loss, pred)\n\n def build(self):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\", name, features[name].shape)\n\n if self._classifier_type == \"Meaning\":\n input_ids_source = features[\"input_ids_source\"]\n input_mask_source = features[\"input_mask_source\"]\n segment_ids_source = features[\"segment_ids_source\"]\n\n input_ids_summary = features[\"input_ids_summary\"]\n input_mask_summary = features[\"input_mask_summary\"]\n segment_ids_summary = features[\"segment_ids_summary\"]\n elif self._classifier_type == \"Grammar\":\n input_ids_source = features[\"input_ids\"]\n input_mask_source = features[\"input_mask\"]\n segment_ids_source = features[\"segment_ids\"]\n\n input_ids_summary = None\n input_mask_summary = None\n segment_ids_summary = None\n else:\n raise ValueError(\"Classification type must be Grammar or Meaning\")\n\n labels = None\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = features[\"labels\"]\n\n (total_loss, per_example_loss, predictions) = self._create_model(\n mode, input_ids_source, input_mask_source, segment_ids_source,\n input_ids_summary, input_mask_summary, segment_ids_summary, labels)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if self._init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(\n tvars, self._init_checkpoint)\n if self._use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(self._init_checkpoint,\n assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(self._init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n tf.logging.info(\"Initializing the model from: %s\",\n self._init_checkpoint)\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(total_loss,\n self._learning_rate,\n self._num_train_steps,\n self._num_warmup_steps,\n self._use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, labels, labels_mask, predictions):\n \"\"\"Compute eval metrics.\"\"\"\n accuracy = tf.cast(\n tf.reduce_all(tf.logical_or(tf.equal(labels, predictions),\n ~tf.cast(labels_mask, tf.bool)),\n axis=1), tf.float32)\n return {\n # This is equal to the Exact score if the final realization step\n # doesn't introduce errors.\n \"sentence_level_acc\": tf.metrics.mean(accuracy),\n \"eval_loss\": tf.metrics.mean(per_example_loss),\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, labels, labels_mask, predictions])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"pred\": predictions},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn\n" ]
[ [ "tensorflow.metrics.mean", "tensorflow.train.Scaffold", "tensorflow.concat", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.train.init_from_checkpoint", "tensorflow.truncated_normal_initializer", "tensorflow.trainable_variables", "tensorflow.equal", "tensorflow.cast", "tensorflow.logging.info", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.variable_scope", "tensorflow.argmax", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.nn.dropout" ] ]
wyli/cpp-py-example
[ "a5a09f1a5d93565dadb082aefa2807e1d157187f" ]
[ "ex/gauss.py" ]
[ "#!/usr/bin/env python3\n\nfrom pymycpp import Bitmap\nimport scipy.ndimage as ndi\n\n\nif __name__ == '__main__':\n img = Bitmap('baboon.bmp')\n\n data_np = img.data()\n\n ndi.gaussian_filter(input=data_np,\n sigma=(5, 15, 0),\n order=0,\n output=data_np)\n\n img.save()\n" ]
[ [ "scipy.ndimage.gaussian_filter" ] ]
simpsus/fmp_python
[ "858e3ff276aa24da77d242c0d72c7ec2a91ac875" ]
[ "fmp_python/common/fmpdecorator.py" ]
[ "import os\nimport functools\nimport pandas as pd\nimport inspect\nfrom datetime import datetime\nfrom pathlib import Path\n\nfrom fmp_python.common.fmpexception import FMPException\n\n\nclass FMPDecorator():\n\n @classmethod\n def inject_api_key(cls,func):\n @functools.wraps(func)\n def deco_function(self, *args, **kwargs):\n api_key = self.api_key\n request = func(self, *args, **kwargs)\n if '?' not in request:\n return request+'?apikey='+api_key\n else:\n return request+'&apikey='+api_key\n return deco_function\n \n @classmethod\n def format_data(cls,func):\n @functools.wraps(func)\n def _call_wrapper(self, *args, **kwargs):\n response = func(self, *args, **kwargs)\n if self.output_format=='json':\n return response.json()\n elif self.output_format=='pandas':\n df = pd.DataFrame()\n try: \n df = pd.DataFrame(response.json()) \n except ValueError as ve:\n pass #logger.error('{}',ve)\n return df \n else:\n raise FMPException(\"Output must be either pandas or json\",FMPDecorator.format_data.__name__) \n\n return _call_wrapper\n @classmethod\n def format_historical_data(cls,func):\n @functools.wraps(func)\n def _call_wrapper(self, *args, **kwargs):\n response = func(self, *args, **kwargs)\n if self.output_format=='json':\n return response.json()['historical']\n elif self.output_format=='pandas':\n return pd.DataFrame(response.json()['historical']) \n else:\n raise FMPException(\"Output must be either pandas or json\",FMPDecorator.format_historical_data.__name__) \n\n return _call_wrapper\n\n @classmethod\n def write_to_file(cls,func):\n @functools.wraps(func)\n def _call_wrapper(self, *args, **kwargs):\n response = func(self, *args, **kwargs)\n if self.write_to_file:\n category = func.__name__.replace('get_','')\n symbol = args[len(args)-1]\n fullname = FMPDecorator.__build_output_tree(symbol, category)\n if self.output_format == 'json':\n pd.DataFrame(response).to_excel(fullname)\n else:\n response.to_excel(fullname)\n return response\n return _call_wrapper\n\n\n @classmethod \n def __build_output_tree(cls, symbol, category):\n current_day = datetime.today().strftime('%Y-%m-%d')\n current_full_date = datetime.now().strftime(\"%d-%m-%Y_%Hh%Mmin%Ss\")\n \n filename = '_'.join([symbol,current_full_date])\n outdir = os.path.join('C:','tmp',category,current_day)\n os.makedirs(outdir, exist_ok=True)\n\n fullname = os.path.join(outdir,filename+'.xlsx')\n\n return fullname\n\n" ]
[ [ "pandas.DataFrame" ] ]
Fariborzzz/fiftyone
[ "06975961f5ee649dd36429feb21c959dfc0744ed" ]
[ "fiftyone/utils/eval/regression.py" ]
[ "\"\"\"\nRegression evaluation.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport logging\nimport itertools\nimport numbers\n\nimport numpy as np\nimport sklearn.metrics as skm\nfrom tabulate import tabulate\n\nimport eta.core.utils as etau\n\nimport fiftyone.core.evaluation as foe\nimport fiftyone.core.fields as fof\nimport fiftyone.core.labels as fol\nimport fiftyone.core.plots as fop\nimport fiftyone.core.validation as fov\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef evaluate_regressions(\n samples,\n pred_field,\n gt_field=\"ground_truth\",\n eval_key=None,\n missing=None,\n method=\"simple\",\n **kwargs,\n):\n \"\"\"Evaluates the regression predictions in the given collection with\n respect to the specified ground truth values.\n\n You can customize the evaluation method by passing additional\n parameters for the method's config class as ``kwargs``.\n\n The supported ``method`` values and their associated configs are:\n\n - ``\"simple\"``: :class:`SimpleEvaluationConfig`\n\n If an ``eval_key`` is specified, then this method will record some\n statistics on each sample:\n\n - When evaluating sample-level fields, an ``eval_key`` field will be\n populated on each sample recording the error of that sample's\n prediction.\n\n - When evaluating frame-level fields, an ``eval_key`` field will be\n populated on each frame recording the error of that frame's\n prediction. In addition, an ``eval_key`` field will be populated on\n each sample that records the average error of the frame predictions\n of the sample.\n\n Args:\n samples: a :class:`fiftyone.core.collections.SampleCollection`\n pred_field: the name of the field containing the predicted\n :class:`fiftyone.core.labels.Regression` instances\n gt_field (\"ground_truth\"): the name of the field containing the\n ground truth :class:`fiftyone.core.labels.Regression` instances\n eval_key (None): a string key to use to refer to this evaluation\n missing (None): a missing value. Any None-valued regressions are\n given this value for results purposes\n method (\"simple\"): a string specifying the evaluation method to use.\n Supported values are ``(\"simple\")``\n **kwargs: optional keyword arguments for the constructor of the\n :class:`RegressionEvaluationConfig` being used\n\n Returns:\n a :class:`RegressionResults`\n \"\"\"\n fov.validate_collection_label_fields(\n samples, (pred_field, gt_field), fol.Regression, same_type=True\n )\n\n config = _parse_config(pred_field, gt_field, method, **kwargs)\n eval_method = config.build()\n eval_method.ensure_requirements()\n\n eval_method.register_run(samples, eval_key)\n\n results = eval_method.evaluate_samples(\n samples, eval_key=eval_key, missing=missing\n )\n eval_method.save_run_results(samples, eval_key, results)\n\n return results\n\n\nclass RegressionEvaluationConfig(foe.EvaluationMethodConfig):\n \"\"\"Base class for configuring :class:`RegressionEvaluation` instances.\n\n Args:\n pred_field: the name of the field containing the predicted\n :class:`fiftyone.core.labels.Regression` instances\n gt_field (\"ground_truth\"): the name of the field containing the ground\n truth :class:`fiftyone.core.labels.Regression` instances\n \"\"\"\n\n def __init__(self, pred_field, gt_field, **kwargs):\n super().__init__(**kwargs)\n self.pred_field = pred_field\n self.gt_field = gt_field\n\n\nclass RegressionEvaluation(foe.EvaluationMethod):\n \"\"\"Base class for regression evaluation methods.\n\n Args:\n config: a :class:`RegressionEvaluationConfig`\n \"\"\"\n\n def evaluate_samples(self, samples, eval_key=None, missing=None):\n \"\"\"Evaluates the regression predictions in the given samples with\n respect to the specified ground truth values.\n\n Args:\n samples: a :class:`fiftyone.core.collections.SampleCollection`\n eval_key (None): an evaluation key for this evaluation\n missing (None): a missing value. Any None-valued regressions are\n given this value for results purposes\n\n Returns:\n a :class:`RegressionResults` instance\n \"\"\"\n pass\n\n def get_fields(self, samples, eval_key):\n fields = [eval_key]\n\n if samples._is_frame_field(self.config.gt_field):\n prefix = samples._FRAMES_PREFIX + eval_key\n fields.append(prefix)\n\n return fields\n\n def cleanup(self, samples, eval_key):\n fields = [eval_key]\n\n samples._dataset.delete_sample_fields(fields, error_level=1)\n if samples._is_frame_field(self.config.gt_field):\n samples._dataset.delete_frame_fields(fields, error_level=1)\n\n def _validate_run(self, samples, eval_key, existing_info):\n self._validate_fields_match(eval_key, \"pred_field\", existing_info)\n self._validate_fields_match(eval_key, \"gt_field\", existing_info)\n\n\nclass SimpleEvaluationConfig(RegressionEvaluationConfig):\n \"\"\"Base class for configuring :class:`SimpleEvaluation` instances.\n\n Args:\n pred_field: the name of the field containing the predicted\n :class:`fiftyone.core.labels.Regression` instances\n gt_field: the name of the field containing the ground truth\n :class:`fiftyone.core.labels.Regression` instances\n metric (\"squared_error\"): the error metric to use to populate\n sample/frame-level error data. Supported values are\n ``(\"squared_error\", \"absolute_error\")`` or any function that\n accepts two scalar arguments ``(ypred, ytrue)``\n \"\"\"\n\n def __init__(self, pred_field, gt_field, metric=\"squared_error\", **kwargs):\n super().__init__(pred_field, gt_field, **kwargs)\n self._metric = metric\n\n @property\n def method(self):\n return \"simple\"\n\n @property\n def metric(self):\n return self._metric if etau.is_str(self._metric) else \"custom\"\n\n def attributes(self):\n return super().attributes() + [\"metric\"]\n\n\nclass SimpleEvaluation(RegressionEvaluation):\n \"\"\"Simple regression evaluation.\n\n Args:\n config: a :class:`SimpleEvaluationConfig`\n \"\"\"\n\n def evaluate_samples(self, samples, eval_key=None, missing=None):\n metric = self.config._metric\n\n if metric == \"squared_error\":\n error_fcn = lambda yp, yt: (yp - yt) ** 2\n elif metric == \"absolute_error\":\n error_fcn = lambda yp, yt: abs(yp - yt)\n elif callable(metric):\n error_fcn = metric\n else:\n raise ValueError(\n \"Unsupported metric '%s'. The supported values are %s or a \"\n \"function that accepts two scalar arguments `(ypred, ytrue)`\"\n % (metric, (\"squared_error\", \"absolute_error\"))\n )\n\n pred_field = self.config.pred_field\n gt_field = self.config.gt_field\n is_frame_field = samples._is_frame_field(gt_field)\n\n gt = gt_field + \".value\"\n pred = pred_field + \".value\"\n pred_conf = pred_field + \".confidence\"\n _id = \"id\" if not is_frame_field else \"frames.id\"\n\n ytrue, ypred, confs, ids = samples.values([gt, pred, pred_conf, _id])\n\n if is_frame_field:\n _ytrue = list(itertools.chain.from_iterable(ytrue))\n _ypred = list(itertools.chain.from_iterable(ypred))\n _confs = list(itertools.chain.from_iterable(confs))\n _ids = list(itertools.chain.from_iterable(ids))\n else:\n _ytrue = ytrue\n _ypred = ypred\n _confs = confs\n _ids = ids\n\n results = RegressionResults(\n _ytrue,\n _ypred,\n confs=_confs,\n eval_key=eval_key,\n gt_field=gt_field,\n pred_field=pred_field,\n ids=_ids,\n missing=missing,\n samples=samples,\n )\n\n if eval_key is None:\n return results\n\n def compute_error(yp, yt):\n if missing is not None:\n if yp is None:\n yp = missing\n\n if yt is None:\n yt = missing\n\n try:\n return error_fcn(yp, yt)\n except:\n return None\n\n # note: fields are manually declared so they'll exist even when\n # `samples` is empty\n dataset = samples._dataset\n if is_frame_field:\n frame_errors = [\n list(map(compute_error, yp, yt))\n for yp, yt in zip(ypred, ytrue)\n ]\n sample_errors = [_safe_mean(e) for e in frame_errors]\n\n eval_frame = samples._FRAMES_PREFIX + eval_key\n\n # Sample-level errors\n dataset._add_sample_field_if_necessary(eval_key, fof.FloatField)\n samples.set_values(eval_key, sample_errors)\n\n # Per-frame errors\n dataset._add_frame_field_if_necessary(eval_key, fof.FloatField)\n samples.set_values(eval_frame, frame_errors)\n else:\n errors = list(map(compute_error, ypred, ytrue))\n\n # Per-sample errors\n dataset._add_sample_field_if_necessary(eval_key, fof.FloatField)\n samples.set_values(eval_key, errors)\n\n return results\n\n\nclass RegressionResults(foe.EvaluationResults):\n \"\"\"Class that stores the results of a regression evaluation.\n\n Args:\n ytrue: a list of ground truth values\n ypred: a list of predicted values\n confs (None): an optional list of confidences for the predictions\n eval_key (None): the evaluation key of the evaluation\n gt_field (None): the name of the ground truth field\n pred_field (None): the name of the predictions field\n ids (None): a list of sample or frame IDs corresponding to the\n regressions\n missing (None): a missing value. Any None-valued regressions are\n given this value for results purposes\n samples (None): the :class:`fiftyone.core.collections.SampleCollection`\n for which the results were computed\n \"\"\"\n\n def __init__(\n self,\n ytrue,\n ypred,\n confs=None,\n eval_key=None,\n gt_field=None,\n pred_field=None,\n ids=None,\n missing=None,\n samples=None,\n ):\n ytrue, ypred, confs, ids = _parse_values(\n ytrue, ypred, confs, ids, missing=missing\n )\n\n self.ytrue = ytrue\n self.ypred = ypred\n self.confs = confs\n self.eval_key = eval_key\n self.gt_field = gt_field\n self.pred_field = pred_field\n self.ids = ids\n self.missing = missing\n\n self._samples = samples\n\n def metrics(self, weights=None):\n \"\"\"Computes various popular regression metrics for the results.\n\n The computed metrics are:\n\n - Mean squared error: :func:`sklearn:sklearn.metrics.mean_squared_error`\n - Root mean squared error: :func:`sklearn:sklearn.metrics.mean_squared_error`\n - Mean absolute error: :func:`sklearn:sklearn.metrics.mean_absolute_error`\n - Median absolute error: :func:`sklearn:sklearn.metrics.median_absolute_error`\n - R^2 score: :func:`sklearn:sklearn.metrics.r2_score`\n - Explained variance score: :func:`sklearn:sklearn.metrics.explained_variance_score`\n - Max error: :func:`sklearn:sklearn.metrics.max_error`\n - Support: the number of examples\n\n Args:\n weights (None): an optional list of weights for each example\n\n Returns:\n a dict\n \"\"\"\n yt = self.ytrue\n yp = self.ypred\n w = weights\n\n if yt.size > 0:\n mse = skm.mean_squared_error(yt, yp, sample_weight=w)\n rmse = np.sqrt(mse)\n mae = skm.mean_absolute_error(yt, yp, sample_weight=w)\n median_absolute_error = skm.median_absolute_error(yt, yp)\n r2_score = skm.r2_score(yt, yp, sample_weight=w)\n ev_score = skm.explained_variance_score(yt, yp, sample_weight=w)\n max_error = skm.max_error(yt, yp)\n support = len(yt)\n else:\n mse = 0.0\n rmse = 0.0\n mae = 0.0\n median_absolute_error = 0.0\n r2_score = 0.0\n ev_score = 0.0\n max_error = 0.0\n support = 0\n\n return {\n \"mean_squared_error\": mse,\n \"root_mean_squared_error\": rmse,\n \"mean_absolute_error\": mae,\n \"median_absolute_error\": median_absolute_error,\n \"r2_score\": r2_score,\n \"explained_variance_score\": ev_score,\n \"max_error\": max_error,\n \"support\": support,\n }\n\n def print_metrics(self, weights=None, digits=2):\n \"\"\"Prints the regression metrics computed via :meth:`metrics`.\n\n Args:\n weights (None): an optional list of weights for each example\n digits (2): the number of digits of precision to print\n \"\"\"\n metrics = self.metrics(weights=weights)\n _print_dict_as_table(metrics, digits)\n\n def plot_results(\n self, labels=None, sizes=None, backend=\"plotly\", **kwargs\n ):\n \"\"\"Plots the regression results.\n\n You can use the ``labels`` parameters to define a coloring for the\n points, and you can use the ``sizes`` parameter to scale the sizes of\n the points.\n\n You can attach plots generated by this method to an App session via its\n :attr:`fiftyone.core.session.Session.plots` attribute, which will\n automatically sync the session's view with the currently selected\n points in the plot.\n\n Args:\n labels (None): data to use to color the points. Can be any of the\n following:\n\n - the name of a sample field or ``embedded.field.name`` of\n from which to extract numeric or string values\n - a :class:`fiftyone.core.expressions.ViewExpression`\n defining numeric or string values to extract via\n :meth:`fiftyone.core.collections.SampleCollection.values`\n - a list or array-like of numeric or string values (or lists\n of lists for frame-level regressions)\n sizes (None): data to use to scale the sizes of the points. Can be\n any of the following:\n\n - the name of a sample field or ``embedded.field.name`` from\n which to extract numeric values\n - a :class:`fiftyone.core.expressions.ViewExpression`\n defining numeric values to extract via\n :meth:`fiftyone.core.collections.SampleCollection.values`\n - a list or array-like of numeric values (or lists of lists\n for frame-level regressions)\n backend (\"plotly\"): the plotting backend to use. Supported values\n are ``(\"plotly\", \"matplotlib\")``\n **kwargs: keyword arguments for the backend plotting method:\n\n - \"plotly\" backend: :meth:`fiftyone.core.plots.plotly.plot_regressions`\n - \"matplotlib\" backend: :meth:`fiftyone.core.plots.matplotlib.plot_regressions`\n\n Returns:\n an :class:`fiftyone.core.plots.base.InteractivePlot`\n \"\"\"\n return fop.plot_regressions(\n self.ytrue,\n self.ypred,\n samples=self._samples,\n ids=self.ids,\n labels=labels,\n sizes=sizes,\n gt_field=self.gt_field,\n pred_field=self.pred_field,\n backend=backend,\n **kwargs,\n )\n\n @classmethod\n def _from_dict(cls, d, samples, config, **kwargs):\n ytrue = d[\"ytrue\"]\n ypred = d[\"ypred\"]\n confs = d.get(\"confs\", None)\n eval_key = d.get(\"eval_key\", None)\n gt_field = d.get(\"gt_field\", None)\n pred_field = d.get(\"pred_field\", None)\n ids = d.get(\"ids\", None)\n missing = d.get(\"missing\", None)\n return cls(\n ytrue,\n ypred,\n confs=confs,\n eval_key=eval_key,\n gt_field=gt_field,\n pred_field=pred_field,\n ids=ids,\n missing=missing,\n samples=samples,\n **kwargs,\n )\n\n\ndef _parse_config(pred_field, gt_field, method, **kwargs):\n if method is None:\n method = \"simple\"\n\n if method == \"simple\":\n return SimpleEvaluationConfig(pred_field, gt_field, **kwargs)\n\n raise ValueError(\"Unsupported evaluation method '%s'\" % method)\n\n\ndef _safe_mean(values):\n values = [v for v in values if v is not None]\n return np.mean(values) if values else None\n\n\ndef _parse_values(ytrue, ypred, *args, missing=None):\n _ytrue = []\n _ypred = []\n _valid = []\n missing_count = 0\n\n for yt, yp in zip(ytrue, ypred):\n v = yt is not None and yp is not None\n\n if missing is None:\n _valid.append(v)\n\n if v:\n _ytrue.append(yt)\n _ypred.append(yp)\n else:\n missing_count += 1\n if missing is not None:\n if yt is None:\n yt = missing\n\n if yp is None:\n yp = missing\n\n _ytrue.append(yt)\n _ypred.append(yp)\n\n found_missing = missing_count > 0\n\n _ytrue = np.array(_ytrue)\n _ypred = np.array(_ypred)\n\n if found_missing and missing is None:\n logger.warning(\n \"Ignoring %d examples with either missing ground truth or \"\n \"predictions\",\n missing_count,\n )\n\n valid = np.array(_valid)\n args = [np.asarray(a)[valid] if a is not None else a for a in args]\n else:\n args = [np.asarray(a) if a is not None else a for a in args]\n\n return (_ytrue, _ypred, *args)\n\n\ndef _print_dict_as_table(d, digits):\n fmt = \"%%.%df\" % digits\n records = []\n for k, v in d.items():\n k = k.replace(\"_\", \" \")\n if isinstance(v, numbers.Integral):\n v = str(v)\n else:\n v = fmt % v\n\n records.append((k, v))\n\n print(tabulate(records, tablefmt=\"plain\", numalign=\"left\"))\n" ]
[ [ "sklearn.metrics.explained_variance_score", "sklearn.metrics.r2_score", "numpy.sqrt", "sklearn.metrics.median_absolute_error", "numpy.asarray", "sklearn.metrics.mean_absolute_error", "sklearn.metrics.mean_squared_error", "sklearn.metrics.max_error", "numpy.mean", "numpy.array" ] ]
Laubeee/caffe-tensorflow
[ "1a5c027b19c6e9d4bc5f2cb4d5906efe46c60466" ]
[ "convert.py" ]
[ "#!/usr/bin/env python\n\nimport os\nimport sys\nimport numpy as np\nimport argparse\nfrom kaffe import KaffeError, print_stderr\nfrom kaffe.tensorflow import TensorFlowTransformer\n\nimport shutil\nimport tensorflow as tf\nfrom tensorflow.python.tools.freeze_graph import freeze_graph\n\n\ndef fatal_error(msg):\n print_stderr(msg)\n exit(-1)\n\n\ndef validate_arguments(args):\n if (args.data_output_path is not None) and (args.caffemodel is None):\n fatal_error('No input data path provided.')\n if (args.caffemodel is not None) and (args.data_output_path is None) and (args.standalone_output_path is None):\n fatal_error('No output data path provided.')\n if (args.code_output_path is None) and (args.data_output_path is None) and (args.standalone_output_path is None):\n fatal_error('No output path specified.')\n\n\ndef convert(def_path, caffemodel_path, data_output_path, code_output_path, standalone_output_path, phase):\n try:\n sess = tf.InteractiveSession()\n transformer = TensorFlowTransformer(def_path, caffemodel_path, phase=phase)\n print_stderr('Converting data...')\n if data_output_path is not None:\n data = transformer.transform_data()\n print_stderr('Saving data...')\n with open(data_output_path, 'wb') as data_out:\n np.save(data_out, data)\n if code_output_path is not None:\n print_stderr('Saving source...')\n with open(code_output_path, 'wb') as src_out:\n src_out.write(transformer.transform_source())\n\n if standalone_output_path:\n filename, _ = os.path.splitext(os.path.basename(standalone_output_path))\n temp_folder = os.path.join(os.path.dirname(standalone_output_path), '.tmp')\n os.makedirs(temp_folder)\n\n if data_output_path is None:\n data = transformer.transform_data()\n print_stderr('Saving data...')\n data_output_path = os.path.join(temp_folder, filename) + '.npy'\n with open(data_output_path, 'wb') as data_out:\n np.save(data_out, data)\n\n if code_output_path is None:\n print_stderr('Saving source...')\n code_output_path = os.path.join(temp_folder, filename) + '.py'\n with open(code_output_path, 'wb') as src_out:\n src_out.write(transformer.transform_source())\n\n checkpoint_path = os.path.join(temp_folder, filename + '.ckpt')\n graph_name = os.path.basename(standalone_output_path)\n graph_folder = os.path.dirname(standalone_output_path)\n input_node = transformer.graph.nodes[0].name\n output_node = transformer.graph.nodes[-1].name\n tensor_shape = transformer.graph.get_node(input_node).output_shape\n tensor_shape_list = [tensor_shape.batch_size, tensor_shape.height, tensor_shape.width, tensor_shape.channels]\n\n sys.path.append(os.path.dirname(code_output_path))\n module = os.path.splitext(os.path.basename(code_output_path))[0]\n class_name = transformer.graph.name\n KaffeNet = getattr(__import__(module), class_name)\n\n data_placeholder = tf.placeholder(tf.float32, tensor_shape_list, name=input_node)\n net = KaffeNet({input_node: data_placeholder})\n\n # load weights stored in numpy format\n net.load(data_output_path, sess)\n\n print_stderr('Saving checkpoint...')\n saver = tf.train.Saver()\n saver.save(sess, checkpoint_path)\n\n print_stderr('Saving graph definition as protobuf...')\n tf.train.write_graph(sess.graph.as_graph_def(), graph_folder, graph_name, False)\n\n input_graph_path = standalone_output_path\n input_saver_def_path = \"\"\n input_binary = True\n input_checkpoint_path = checkpoint_path\n output_node_names = output_node\n restore_op_name = 'save/restore_all'\n filename_tensor_name = 'save/Const:0'\n output_graph_path = standalone_output_path\n clear_devices = True\n\n print_stderr('Saving standalone model...')\n freeze_graph(input_graph_path, input_saver_def_path,\n input_binary, input_checkpoint_path,\n output_node_names, restore_op_name,\n filename_tensor_name, output_graph_path,\n clear_devices, '')\n\n shutil.rmtree(temp_folder)\n\n print_stderr('Done.')\n except KaffeError as err:\n fatal_error('Error encountered: {}'.format(err))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('def_path', help='Model definition (.prototxt) path')\n parser.add_argument('--caffemodel', help='Model data (.caffemodel) path')\n parser.add_argument('--data-output-path', help='Converted data output path')\n parser.add_argument('--code-output-path', help='Save generated source to this path')\n parser.add_argument('--standalone-output-path', help='Save generated standalone tensorflow model to this path')\n parser.add_argument('-p',\n '--phase',\n default='test',\n help='The phase to convert: test (default) or train')\n args = parser.parse_args()\n validate_arguments(args)\n convert(args.def_path, args.caffemodel, args.data_output_path, args.code_output_path,\n args.standalone_output_path, args.phase)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.InteractiveSession", "tensorflow.python.tools.freeze_graph.freeze_graph", "tensorflow.placeholder", "numpy.save", "tensorflow.train.Saver" ] ]
kuna-systems/detr
[ "ae18dec551b4810eb44d58d612c5181812305a1b" ]
[ "d2/detr/detr.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport math\nfrom typing import List\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn.functional as F\nfrom scipy.optimize import linear_sum_assignment\nfrom torch import nn\n\nfrom detectron2.layers import ShapeSpec\nfrom detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess\nfrom detectron2.structures import Boxes, ImageList, Instances, BitMasks, PolygonMasks\nfrom detectron2.utils.logger import log_first_n\nfrom fvcore.nn import giou_loss, smooth_l1_loss\nfrom models.backbone import Joiner\nfrom models.detr import DETR, SetCriterion\nfrom models.matcher import HungarianMatcher\nfrom models.position_encoding import PositionEmbeddingSine\nfrom models.transformer import Transformer\nfrom models.segmentation import DETRsegm, PostProcessPanoptic, PostProcessSegm\nfrom util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh\nfrom util.misc import NestedTensor\nfrom datasets.coco import convert_coco_poly_to_mask\n\n__all__ = [\"Detr\"]\n\n\nclass MaskedBackbone(nn.Module):\n \"\"\" This is a thin wrapper around D2's backbone to provide padding masking\"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n self.backbone = build_backbone(cfg)\n backbone_shape = self.backbone.output_shape()\n self.feature_strides = [backbone_shape[f].stride for f in backbone_shape.keys()]\n self.num_channels = backbone_shape[list(backbone_shape.keys())[-1]].channels\n\n def forward(self, images):\n features = self.backbone(images.tensor)\n masks = self.mask_out_padding(\n [features_per_level.shape for features_per_level in features.values()],\n images.image_sizes,\n images.tensor.device,\n )\n assert len(features) == len(masks)\n for i, k in enumerate(features.keys()):\n features[k] = NestedTensor(features[k], masks[i])\n return features\n\n def mask_out_padding(self, feature_shapes, image_sizes, device):\n masks = []\n assert len(feature_shapes) == len(self.feature_strides)\n for idx, shape in enumerate(feature_shapes):\n N, _, H, W = shape\n masks_per_feature_level = torch.ones((N, H, W), dtype=torch.bool, device=device)\n for img_idx, (h, w) in enumerate(image_sizes):\n masks_per_feature_level[\n img_idx,\n : int(np.ceil(float(h) / self.feature_strides[idx])),\n : int(np.ceil(float(w) / self.feature_strides[idx])),\n ] = 0\n masks.append(masks_per_feature_level)\n return masks\n\n\n@META_ARCH_REGISTRY.register()\nclass Detr(nn.Module):\n \"\"\"\n Implement Detr\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n\n self.device = torch.device(cfg.MODEL.DEVICE)\n\n self.num_classes = cfg.MODEL.DETR.NUM_CLASSES\n self.mask_on = cfg.MODEL.MASK_ON\n hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM\n num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES\n # Transformer parameters:\n nheads = cfg.MODEL.DETR.NHEADS\n dropout = cfg.MODEL.DETR.DROPOUT\n dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD\n enc_layers = cfg.MODEL.DETR.ENC_LAYERS\n dec_layers = cfg.MODEL.DETR.DEC_LAYERS\n pre_norm = cfg.MODEL.DETR.PRE_NORM\n\n # Loss parameters:\n giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT\n l1_weight = cfg.MODEL.DETR.L1_WEIGHT\n labels_weight = cfg.MODEL.LABELS_LOSS_W\n deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION\n no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT\n\n N_steps = hidden_dim // 2\n d2_backbone = MaskedBackbone(cfg)\n backbone = Joiner(d2_backbone, PositionEmbeddingSine(N_steps, normalize=True))\n backbone.num_channels = d2_backbone.num_channels\n\n transformer = Transformer(\n d_model=hidden_dim,\n dropout=dropout,\n nhead=nheads,\n dim_feedforward=dim_feedforward,\n num_encoder_layers=enc_layers,\n num_decoder_layers=dec_layers,\n normalize_before=pre_norm,\n return_intermediate_dec=deep_supervision,\n )\n\n self.detr = DETR(\n backbone, transformer, num_classes=self.num_classes, num_queries=num_queries, aux_loss=deep_supervision\n )\n if self.mask_on:\n frozen_weights = cfg.MODEL.DETR.FROZEN_WEIGHTS\n if frozen_weights != '':\n print(\"LOAD pre-trained weights\")\n weight = torch.load(frozen_weights, map_location=lambda storage, loc: storage)['model']\n new_weight = {}\n for k, v in weight.items():\n if 'detr.' in k:\n new_weight[k.replace('detr.', '')] = v\n else:\n print(f\"Skipping loading weight {k} from frozen model\")\n del weight\n self.detr.load_state_dict(new_weight)\n del new_weight\n self.detr = DETRsegm(self.detr, freeze_detr=(frozen_weights != ''))\n self.seg_postprocess = PostProcessSegm\n\n self.detr.to(self.device)\n\n # building criterion\n matcher = HungarianMatcher(cost_class=1, cost_bbox=l1_weight, cost_giou=giou_weight)\n weight_dict = {\"loss_ce\": labels_weight, \"loss_bbox\": l1_weight}\n weight_dict[\"loss_giou\"] = giou_weight\n if deep_supervision:\n aux_weight_dict = {}\n for i in range(dec_layers - 1):\n aux_weight_dict.update({k + f\"_{i}\": v for k, v in weight_dict.items()})\n weight_dict.update(aux_weight_dict)\n losses = [\"labels\", \"boxes\", \"cardinality\"]\n if self.mask_on:\n losses += [\"masks\"]\n self.criterion = SetCriterion(\n self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, labels_loss=cfg.MODEL.LABELS_LOSS\n )\n self.criterion.to(self.device)\n\n pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)\n pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n self.to(self.device)\n\n def forward(self, batched_inputs):\n \"\"\"\n Args:\n batched_inputs: a list, batched outputs of :class:`DatasetMapper` .\n Each item in the list contains the inputs for one image.\n For now, each item in the list is a dict that contains:\n\n * image: Tensor, image in (C, H, W) format.\n * instances: Instances\n\n Other information that's included in the original dicts, such as:\n\n * \"height\", \"width\" (int): the output resolution of the model, used in inference.\n See :meth:`postprocess` for details.\n Returns:\n dict[str: Tensor]:\n mapping from a named loss to a tensor storing the loss. Used during training only.\n \"\"\"\n images = self.preprocess_image(batched_inputs)\n output = self.detr(images)\n\n if self.training:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n\n targets = self.prepare_targets(gt_instances)\n loss_dict = self.criterion(output, targets)\n weight_dict = self.criterion.weight_dict\n for k in loss_dict.keys():\n if k in weight_dict:\n loss_dict[k] *= weight_dict[k]\n return loss_dict\n else:\n box_cls = output[\"pred_logits\"]\n box_pred = output[\"pred_boxes\"]\n mask_pred = output[\"pred_masks\"] if self.mask_on else None\n results = self.inference(box_cls, box_pred, mask_pred, images.image_sizes)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"instances\": r})\n return processed_results\n\n def prepare_targets(self, targets):\n new_targets = []\n for targets_per_image in targets:\n h, w = targets_per_image.image_size\n image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device)\n gt_classes = targets_per_image.gt_classes\n gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy\n gt_boxes = box_xyxy_to_cxcywh(gt_boxes)\n new_targets.append({\"labels\": gt_classes, \"boxes\": gt_boxes})\n if self.mask_on and hasattr(targets_per_image, 'gt_masks'):\n gt_masks = targets_per_image.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n new_targets[-1].update({'masks': gt_masks})\n return new_targets\n\n def inference(self, box_cls, box_pred, mask_pred, image_sizes):\n \"\"\"\n Arguments:\n box_cls (Tensor): tensor of shape (batch_size, num_queries, K).\n The tensor predicts the classification probability for each query.\n box_pred (Tensor): tensors of shape (batch_size, num_queries, 4).\n The tensor predicts 4-vector (x,y,w,h) box\n regression values for every queryx\n image_sizes (List[torch.Size]): the input image sizes\n\n Returns:\n results (List[Instances]): a list of #images elements.\n \"\"\"\n assert len(box_cls) == len(image_sizes)\n results = []\n\n # For each box we assign the best class or the second best if the best on is `no_object`.\n scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1)\n\n for i, (scores_per_image, labels_per_image, box_pred_per_image, image_size) in enumerate(zip(\n scores, labels, box_pred, image_sizes\n )):\n result = Instances(image_size)\n result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image))\n\n result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])\n if self.mask_on:\n mask = F.interpolate(mask_pred[i].unsqueeze(0), size=image_size, mode='bilinear', align_corners=False)\n mask = mask[0].sigmoid() > 0.5\n B, N, H, W = mask_pred.shape\n mask = BitMasks(mask.cpu()).crop_and_resize(result.pred_boxes.tensor.cpu(), 32)\n result.pred_masks = mask.unsqueeze(1).to(mask_pred[0].device)\n\n result.scores = scores_per_image\n result.pred_classes = labels_per_image\n results.append(result)\n return results\n\n def preprocess_image(self, batched_inputs):\n \"\"\"\n Normalize, pad and batch the input images.\n \"\"\"\n images = [self.normalizer(x[\"image\"].to(self.device)) for x in batched_inputs]\n images = ImageList.from_tensors(images)\n return images\n" ]
[ [ "torch.nn.functional.softmax", "torch.ones", "torch.Tensor", "torch.load", "torch.device", "torch.as_tensor" ] ]
AndyYuan96/MVF-End-to-End-Multi-View-Fusion-for-3D-Object-Detection-in-LiDAR-Point-Clouds-
[ "cf34897f25353a3f348d0a39c8db5ba15cadb2d7" ]
[ "pcdet/models/bbox_heads/anchor_target_assigner.py" ]
[ "# This file is modified from https://github.com/traveller59/second.pytorch\n\nimport numpy as np\nimport numpy.random as npr\nimport numba\nfrom ...utils import common_utils\n\n\ndef unmap(data, count, inds, fill=0):\n '''Unmap a subset of item (data) back to the original set of items (of\n size count)'''\n if count == len(inds):\n return data\n\n if len(data.shape) == 1:\n ret = np.empty((count, ), dtype=data.dtype)\n ret.fill(fill)\n ret[inds] = data\n else:\n ret = np.empty((count, ) + data.shape[1:], dtype=data.dtype)\n ret.fill(fill)\n ret[inds, :] = data\n return ret\n\n\ndef create_anchors_3d_range(feature_size,\n anchor_range,\n sizes=((1.6, 3.9, 1.56),),\n rotations=(0, np.pi / 2),\n dtype=np.float32):\n \"\"\"\n Args:\n feature_size: list [D, H, W](zyx)\n sizes: [N, 3] list of list or array, size of anchors, xyz\n\n Returns:\n anchors: [*feature_size, num_sizes, num_rots, 7] tensor.\n \"\"\"\n\n anchor_range = np.array(anchor_range, dtype)\n z_centers = np.linspace(\n anchor_range[2], anchor_range[5], feature_size[0], dtype=dtype)\n y_centers = np.linspace(\n anchor_range[1], anchor_range[4], feature_size[1], dtype=dtype)\n x_centers = np.linspace(\n anchor_range[0], anchor_range[3], feature_size[2], dtype=dtype)\n sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])\n rotations = np.array(rotations, dtype=dtype)\n rets = np.meshgrid(\n x_centers, y_centers, z_centers, rotations, indexing='ij')\n tile_shape = [1] * 5\n tile_shape[-2] = int(sizes.shape[0])\n for i in range(len(rets)):\n rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)\n rets[i] = rets[i][..., np.newaxis] # for concat\n sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])\n tile_size_shape = list(rets[0].shape)\n tile_size_shape[3] = 1\n sizes = np.tile(sizes, tile_size_shape)\n rets.insert(3, sizes)\n ret = np.concatenate(rets, axis=-1)\n return np.transpose(ret, [2, 1, 0, 3, 4, 5])\n\n\ndef corners_nd(dims, origin=0.5):\n \"\"\"generate relative box corners based on length per dim and\n origin point.\n\n Args:\n dims (float array, shape=[N, ndim]): array of length per dim\n origin (list or array or float): origin point relate to smallest point.\n\n Returns:\n float array, shape=[N, 2 ** ndim, ndim]: returned corners.\n point layout example: (2d) x0y0, x0y1, x1y0, x1y1;\n (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\n where x0 < x1, y0 < y1, z0 < z1\n \"\"\"\n ndim = int(dims.shape[1])\n corners_norm = np.stack(\n np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1).astype(\n dims.dtype)\n # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1\n # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\n # so need to convert to a format which is convenient to do other computing.\n # for 2d boxes, format is clockwise start with minimum point\n # for 3d boxes, please draw lines by your hand.\n if ndim == 2:\n # generate clockwise box corners\n corners_norm = corners_norm[[0, 1, 3, 2]]\n elif ndim == 3:\n corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]\n corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)\n corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape(\n [1, 2 ** ndim, ndim])\n return corners\n\n\ndef center_to_minmax_2d_0_5(centers, dims):\n return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1)\n\n\ndef rotation_2d(points, angles):\n \"\"\"rotation 2d points based on origin point clockwise when angle positive.\n\n Args:\n points (float array, shape=[N, point_size, 2]): points to be rotated.\n angles (float array, shape=[N]): rotation angle.\n\n Returns:\n float array: same shape as points\n \"\"\"\n rot_sin = np.sin(angles)\n rot_cos = np.cos(angles)\n rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])\n return np.einsum('aij,jka->aik', points, rot_mat_T)\n\n\ndef center_to_corner_box2d(centers, dims, angles=None, origin=0.5):\n \"\"\"convert kitti locations, dimensions and angles to corners.\n format: center(xy), dims(xy), angles(clockwise when positive)\n\n Args:\n centers (float array, shape=[N, 2]): locations in kitti label file.\n dims (float array, shape=[N, 2]): dimensions in kitti label file.\n angles (float array, shape=[N]): rotation_y in kitti label file.\n\n Returns:\n [type]: [description]\n \"\"\"\n # 'length' in kitti format is in x axis.\n # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)\n # center in kitti format is [0.5, 1.0, 0.5] in xyz.\n corners = corners_nd(dims, origin=origin)\n # corners: [N, 4, 2]\n if angles is not None:\n corners = rotation_2d(corners, angles)\n corners += centers.reshape([-1, 1, 2])\n return corners\n\n\ndef center_to_minmax_2d(centers, dims, origin=0.5):\n if origin == 0.5:\n return center_to_minmax_2d_0_5(centers, dims)\n corners = center_to_corner_box2d(centers, dims, origin=origin)\n return corners[:, [0, 2]].reshape([-1, 4])\n\n\ndef rbbox2d_to_near_bbox(rbboxes):\n \"\"\"convert rotated bbox to nearest 'standing' or 'lying' bbox.\n Args:\n rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes\n Returns:\n bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes\n \"\"\"\n rots = rbboxes[..., -1]\n rots_0_pi_div_2 = np.abs(common_utils.limit_period(rots, 0.5, np.pi))\n cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis]\n bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])\n bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])\n return bboxes\n\n\[email protected](nopython=True)\ndef iou_jit(boxes, query_boxes, eps=0.0):\n \"\"\"calculate box iou. note that jit version runs 2x faster than cython in\n my machine!\n Parameters\n ----------\n boxes: (N, 4) ndarray of float\n query_boxes: (K, 4) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n \"\"\"\n N = boxes.shape[0]\n K = query_boxes.shape[0]\n overlaps = np.zeros((N, K), dtype=boxes.dtype)\n for k in range(K):\n box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + eps) *\n (query_boxes[k, 3] - query_boxes[k, 1] + eps))\n for n in range(N):\n iw = (min(boxes[n, 2], query_boxes[k, 2]) -\n max(boxes[n, 0], query_boxes[k, 0]) + eps)\n if iw > 0:\n ih = (min(boxes[n, 3], query_boxes[k, 3]) -\n max(boxes[n, 1], query_boxes[k, 1]) + eps)\n if ih > 0:\n ua = (\n (boxes[n, 2] - boxes[n, 0] + eps) *\n (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih)\n overlaps[n, k] = iw * ih / ua\n return overlaps\n\n\nclass AnchorGeneratorRange(object):\n def __init__(self, anchor_ranges, sizes=((1.6, 3.9, 1.56),), rotations=(0, np.pi / 2), class_name=None,\n match_threshold=-1, unmatch_threshold=-1, custom_values=None, dtype=np.float32, feature_map_size=None):\n self._sizes = sizes\n self._anchor_ranges = anchor_ranges\n self._rotations = rotations\n self._dtype = dtype\n self._class_name = class_name\n self._match_threshold = match_threshold\n self._unmatch_threshold = unmatch_threshold\n self._custom_values = custom_values\n self._feature_map_size = feature_map_size\n\n @property\n def class_name(self):\n return self._class_name\n\n @property\n def match_threshold(self):\n return self._match_threshold\n\n @property\n def unmatch_threshold(self):\n return self._unmatch_threshold\n\n @property\n def custom_values(self):\n return self.custom_values\n\n @property\n def feature_map_size(self):\n return self._feature_map_size\n\n @property\n def num_anchors_per_localization(self):\n num_rot = len(self._rotations)\n num_size = np.array(self._sizes).reshape([-1, 3]).shape[0]\n return num_rot * num_size\n\n def generate(self, feature_map_size):\n anchors = create_anchors_3d_range(feature_map_size, self._anchor_ranges, self._sizes,\n self._rotations, self._dtype)\n if self._custom_values is not None:\n custom_values = np.zeros((*anchors.shape[:-1], len(self._custom_values)), dtype=self._dtype)\n for k in range(len(self._custom_values)):\n custom_values[..., k] = self._custom_values[k]\n anchors = np.concatenate((anchors, custom_values), axis=-1)\n return anchors\n\n\nclass TargetAssigner(object):\n def __init__(self, anchor_generators, pos_fraction, sample_size, region_similarity_fn_name, box_coder, logger=None):\n super().__init__()\n self.anchor_generators = anchor_generators\n self.pos_fraction = pos_fraction if pos_fraction >= 0 else None\n self.sample_size = sample_size\n self.region_similarity_calculator = getattr(self, region_similarity_fn_name)\n self.box_coder = box_coder\n self.logger = logger\n\n def generate_anchors(self, feature_map_size=None, use_multi_head=False):\n anchors_list = []\n matched_thresholds = [a.match_threshold for a in self.anchor_generators]\n unmatched_thresholds = [a.unmatch_threshold for a in self.anchor_generators]\n match_list, unmatch_list = [], []\n for anchor_generator, match_thresh, unmatch_thresh in zip(self.anchor_generators,\n matched_thresholds, unmatched_thresholds):\n if use_multi_head:\n anchors = anchor_generator.generate(anchor_generator.feature_map_size) # (1, H, W, 2#, code_size)\n anchors = anchors.reshape([*anchors.shape[:3], -1, anchors.shape[-1]])\n ndim = len(anchor_generator.feature_map_size)\n anchors = anchors.transpose(ndim, *range(0, ndim), ndim + 1) # (2#, 1, H, W, code_size)\n anchors = anchors.reshape(-1, anchors.shape[-1])\n else:\n anchors = anchor_generator.generate(feature_map_size)\n anchors = anchors.reshape([*anchors.shape[:3], -1, anchors.shape[-1]])\n\n anchors_list.append(anchors)\n num_anchors = np.prod(anchors.shape[:-1])\n match_list.append(np.full([num_anchors], match_thresh, anchors.dtype))\n unmatch_list.append(np.full([num_anchors], unmatch_thresh, anchors.dtype))\n anchors = np.concatenate(anchors_list, axis=-2)\n matched_thresholds = np.concatenate(match_list, axis=0)\n unmatched_thresholds = np.concatenate(unmatch_list, axis=0)\n return {\n 'anchors': anchors,\n 'matched_thresholds': matched_thresholds,\n 'unmatched_thresholds': unmatched_thresholds\n }\n\n def generate_anchors_dict(self, feature_map_size, use_multi_head=False):\n anchors_list = []\n matched_thresholds = [a.match_threshold for a in self.anchor_generators]\n unmatched_thresholds = [a.unmatch_threshold for a in self.anchor_generators]\n match_list, unmatch_list = [], []\n anchors_dict = {a.class_name: {} for a in self.anchor_generators}\n for anchor_generator, match_thresh, unmatch_thresh in zip(self.anchor_generators,\n matched_thresholds, unmatched_thresholds):\n if use_multi_head:\n anchors = anchor_generator.generate(anchor_generator.feature_map_size)\n anchors = anchors.reshape([*anchors.shape[:3], -1, anchors.shape[-1]])\n ndim = len(feature_map_size)\n anchors = anchors.transpose(ndim, *range(0, ndim), ndim + 1)\n else:\n anchors = anchor_generator.generate(feature_map_size)\n anchors = anchors.reshape([*anchors.shape[:3], -1, anchors.shape[-1]])\n\n anchors_list.append(anchors)\n num_anchors = np.prod(anchors.shape[:-1])\n match_list.append(np.full([num_anchors], match_thresh, anchors.dtype))\n unmatch_list.append(np.full([num_anchors], unmatch_thresh, anchors.dtype))\n class_name = anchor_generator.class_name\n anchors_dict[class_name]['anchors'] = anchors\n anchors_dict[class_name]['matched_thresholds'] = match_list[-1]\n anchors_dict[class_name]['unmatched_thresholds'] = unmatch_list[-1]\n return anchors_dict\n\n @staticmethod\n def nearest_iou_similarity(boxes1, boxes2):\n boxes1_bv = rbbox2d_to_near_bbox(boxes1)\n boxes2_bv = rbbox2d_to_near_bbox(boxes2)\n ret = iou_jit(boxes1_bv, boxes2_bv, eps=0.0)\n return ret\n\n def assign_v2(self, anchors_dict, gt_boxes, anchors_mask=None, gt_classes=None, gt_names=None):\n prune_anchor_fn = None if anchors_mask is None else lambda _: np.where(anchors_mask)[0]\n\n def similarity_fn(anchors, gt_boxes):\n anchors_rbv = anchors[:, [0, 1, 3, 4, 6]]\n gt_boxes_rbv = gt_boxes[:, [0, 1, 3, 4, 6]]\n return self.region_similarity_calculator(anchors_rbv, gt_boxes_rbv)\n\n def box_encoding_fn(boxes, anchors):\n return self.box_coder.encode_np(boxes, anchors)\n\n targets_list = []\n for class_name, anchor_dict in anchors_dict.items():\n mask = np.array([c == class_name for c in gt_names], dtype=np.bool_)\n targets = self.create_target_np(\n # anchor_dict['anchors'].reshape(-1, self.box_coder.code_size),\n anchor_dict['anchors'].reshape(-1, anchor_dict['anchors'].shape[-1]),\n gt_boxes[mask],\n similarity_fn,\n box_encoding_fn,\n prune_anchor_fn=prune_anchor_fn,\n gt_classes=gt_classes[mask],\n matched_threshold=anchor_dict['matched_thresholds'],\n unmatched_threshold=anchor_dict['unmatched_thresholds'],\n positive_fraction=self.pos_fraction,\n rpn_batch_size=self.sample_size,\n norm_by_num_examples=False,\n box_code_size=self.box_coder.code_size\n )\n targets_list.append(targets)\n feature_map_size = anchor_dict['anchors'].shape[:3]\n targets_dict = {\n 'labels': [t['labels'] for t in targets_list],\n 'bbox_targets': [t['bbox_targets'] for t in targets_list],\n 'bbox_src_targets': [t['bbox_src_targets'] for t in targets_list],\n 'bbox_outside_weights': [t['bbox_outside_weights'] for t in targets_list],\n }\n # bbox_targets: (H, W, num_anchors_per_loc, code_size)\n targets_dict['bbox_targets'] = np.concatenate([v.reshape(*feature_map_size, -1, self.box_coder.code_size)\n for v in targets_dict['bbox_targets']], axis=-2)\n targets_dict['bbox_src_targets'] = np.concatenate([v.reshape(*feature_map_size, -1, 7)#self.box_coder.code_size)\n for v in targets_dict['bbox_src_targets']], axis=-2)\n targets_dict['labels'] = np.concatenate([v.reshape(*feature_map_size, -1)\n for v in targets_dict['labels']], axis=-1)\n targets_dict['bbox_outside_weights'] = np.concatenate([v.reshape(*feature_map_size, -1)\n for v in targets_dict['bbox_outside_weights']], axis=-1)\n\n targets_dict['bbox_targets'] = targets_dict['bbox_targets'].reshape(-1, self.box_coder.code_size)\n targets_dict['bbox_src_targets'] = targets_dict['bbox_src_targets'].reshape(-1, self.box_coder.code_size)\n targets_dict['labels'] = targets_dict['labels'].reshape(-1)\n targets_dict['bbox_outside_weights'] = targets_dict['bbox_outside_weights'].reshape(-1)\n\n return targets_dict\n\n def assign_multihead(self, anchors_dict, gt_boxes, anchors_mask=None, gt_classes=None, gt_names=None):\n prune_anchor_fn = None if anchors_mask is None else lambda _: np.where(anchors_mask)[0]\n\n def similarity_fn(anchors, gt_boxes):\n anchors_rbv = anchors[:, [0, 1, 3, 4, 6]]\n gt_boxes_rbv = gt_boxes[:, [0, 1, 3, 4, 6]]\n return self.region_similarity_calculator(anchors_rbv, gt_boxes_rbv)\n\n def box_encoding_fn(boxes, anchors):\n return self.box_coder.encode_np(boxes, anchors)\n\n targets_list = []\n for class_name, anchor_dict in anchors_dict.items():\n mask = np.array([c == class_name for c in gt_names], dtype=np.bool_)\n targets = self.create_target_np(\n # anchor_dict['anchors'].reshape(-1, self.box_coder.code_size),\n anchor_dict['anchors'].reshape(-1, anchor_dict['anchors'].shape[-1]),\n gt_boxes[mask],\n similarity_fn,\n box_encoding_fn,\n prune_anchor_fn=prune_anchor_fn,\n gt_classes=gt_classes[mask],\n matched_threshold=anchor_dict['matched_thresholds'],\n unmatched_threshold=anchor_dict['unmatched_thresholds'],\n positive_fraction=self.pos_fraction,\n rpn_batch_size=self.sample_size,\n norm_by_num_examples=False,\n box_code_size=self.box_coder.code_size\n )\n targets_list.append(targets)\n targets_dict = {\n 'labels': [t['labels'] for t in targets_list],\n 'bbox_targets': [t['bbox_targets'] for t in targets_list],\n 'bbox_outside_weights': [t['bbox_outside_weights'] for t in targets_list],\n }\n # # bbox_targets: (H, W, num_anchors_per_loc, code_size)\n targets_dict['bbox_targets'] = np.concatenate([v.reshape(-1, self.box_coder.code_size)\n for v in targets_dict['bbox_targets']], axis=0)\n targets_dict['labels'] = np.concatenate([v.reshape(-1) for v in targets_dict['labels']], axis=0)\n targets_dict['bbox_outside_weights'] = np.concatenate([v.reshape(-1)\n for v in targets_dict['bbox_outside_weights']], axis=0)\n return targets_dict\n\n def create_target_np(self, all_anchors,\n gt_boxes,\n similarity_fn,\n box_encoding_fn,\n prune_anchor_fn=None,\n gt_classes=None,\n matched_threshold=0.6,\n unmatched_threshold=0.45,\n bbox_inside_weight=None,\n positive_fraction=None,\n rpn_batch_size=300,\n norm_by_num_examples=False,\n box_code_size=7):\n '''Modified from FAIR detectron.\n Args:\n all_anchors: [num_of_anchors, box_ndim] float tensor.\n gt_boxes: [num_gt_boxes, box_ndim] float tensor.\n similarity_fn: a function, accept anchors and gt_boxes, return\n similarity matrix(such as IoU).\n box_encoding_fn: a function, accept gt_boxes and anchors, return\n box encodings(offsets).\n prune_anchor_fn: a function, accept anchors, return indices that\n indicate valid anchors.\n gt_classes: [num_gt_boxes] int tensor. indicate gt classes, must\n start with 1.\n matched_threshold: float, iou greater than matched_threshold will\n be treated as positives.\n unmatched_threshold: float, iou smaller than unmatched_threshold will\n be treated as negatives.\n bbox_inside_weight: unused\n positive_fraction: [0-1] float or None. if not None, we will try to\n keep ratio of pos/neg equal to positive_fraction when sample.\n if there is not enough positives, it fills the rest with negatives\n rpn_batch_size: int. sample size\n norm_by_num_examples: bool. norm box_weight by number of examples, but\n I recommend to do this outside.\n Returns:\n labels, bbox_targets, bbox_outside_weights\n '''\n total_anchors = all_anchors.shape[0]\n if prune_anchor_fn is not None:\n inds_inside = prune_anchor_fn(all_anchors)\n anchors = all_anchors[inds_inside, :]\n if not isinstance(matched_threshold, float):\n matched_threshold = matched_threshold[inds_inside]\n if not isinstance(unmatched_threshold, float):\n unmatched_threshold = unmatched_threshold[inds_inside]\n else:\n anchors = all_anchors\n inds_inside = None\n num_inside = len(inds_inside) if inds_inside is not None else total_anchors\n box_ndim = all_anchors.shape[1]\n if self.logger is not None:\n self.logger.info('total_anchors: {}'.format(total_anchors))\n self.logger.info('inds_inside: {}'.format(num_inside))\n self.logger.info('anchors.shape: {}'.format(anchors.shape))\n if gt_classes is None:\n gt_classes = np.ones([gt_boxes.shape[0]], dtype=np.int32)\n # Compute anchor labels:\n # label=1 is positive, 0 is negative, -1 is don't care (ignore)\n labels = np.empty((num_inside,), dtype=np.int32)\n gt_ids = np.empty((num_inside,), dtype=np.int32)\n labels.fill(-1)\n gt_ids.fill(-1)\n if len(gt_boxes) > 0 and anchors.shape[0] > 0:\n # Compute overlaps between the anchors and the gt boxes overlaps\n anchor_by_gt_overlap = similarity_fn(anchors, gt_boxes)\n # Map from anchor to gt box that has highest overlap\n anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1)\n # For each anchor, amount of overlap with most overlapping gt box\n anchor_to_gt_max = anchor_by_gt_overlap[np.arange(num_inside),\n anchor_to_gt_argmax] #\n # Map from gt box to an anchor that has highest overlap\n gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0)\n # For each gt box, amount of overlap with most overlapping anchor\n gt_to_anchor_max = anchor_by_gt_overlap[\n gt_to_anchor_argmax,\n np.arange(anchor_by_gt_overlap.shape[1])]\n # must remove gt which doesn't match any anchor.\n empty_gt_mask = gt_to_anchor_max == 0\n gt_to_anchor_max[empty_gt_mask] = -1\n # Find all anchors that share the max overlap amount\n # (this includes many ties)\n anchors_with_max_overlap = np.where(\n anchor_by_gt_overlap == gt_to_anchor_max)[0]\n # Fg label: for each gt use anchors with highest overlap\n # (including ties)\n gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap]\n labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]\n gt_ids[anchors_with_max_overlap] = gt_inds_force\n # Fg label: above threshold IOU\n pos_inds = anchor_to_gt_max >= matched_threshold\n gt_inds = anchor_to_gt_argmax[pos_inds]\n labels[pos_inds] = gt_classes[gt_inds]\n gt_ids[pos_inds] = gt_inds\n bg_inds = np.where(anchor_to_gt_max < unmatched_threshold)[0]\n else:\n # labels[:] = 0\n bg_inds = np.arange(num_inside)\n fg_inds = np.where(labels > 0)[0]\n fg_max_overlap = None\n if len(gt_boxes) > 0 and anchors.shape[0] > 0:\n fg_max_overlap = anchor_to_gt_max[fg_inds]\n gt_pos_ids = gt_ids[fg_inds]\n # bg_inds = np.where(anchor_to_gt_max < unmatched_threshold)[0]\n # bg_inds = np.where(labels == 0)[0]\n # subsample positive labels if we have too many\n if positive_fraction is not None:\n num_fg = int(positive_fraction * rpn_batch_size)\n if len(fg_inds) > num_fg:\n disable_inds = npr.choice(\n fg_inds, size=(len(fg_inds) - num_fg), replace=False)\n labels[disable_inds] = -1\n fg_inds = np.where(labels > 0)[0]\n\n # subsample negative labels if we have too many\n # (samples with replacement, but since the set of bg inds is large most\n # samples will not have repeats)\n num_bg = rpn_batch_size - np.sum(labels > 0)\n # print(num_fg, num_bg, len(bg_inds) )\n if len(bg_inds) > num_bg:\n enable_inds = bg_inds[npr.randint(len(bg_inds), size=num_bg)]\n labels[enable_inds] = 0\n bg_inds = np.where(labels == 0)[0]\n else:\n if len(gt_boxes) == 0 or anchors.shape[0] == 0:\n labels[:] = 0\n else:\n labels[bg_inds] = 0\n # re-enable anchors_with_max_overlap\n labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]\n bbox_targets = np.zeros(\n (num_inside, box_code_size), dtype=all_anchors.dtype)\n bbox_src_targets = np.zeros(\n (num_inside, 7), dtype=all_anchors.dtype)\n\n if len(gt_boxes) > 0 and anchors.shape[0] > 0:\n # print(anchors[fg_inds, :].shape, gt_boxes[anchor_to_gt_argmax[fg_inds], :].shape)\n # bbox_targets[fg_inds, :] = box_encoding_fn(\n # anchors[fg_inds, :], gt_boxes[anchor_to_gt_argmax[fg_inds], :])\n fg_gt_boxes = gt_boxes[anchor_to_gt_argmax[fg_inds], :]\n fg_anchors = anchors[fg_inds, :]\n bbox_targets[fg_inds, :] = box_encoding_fn(fg_gt_boxes, fg_anchors)\n temp_src_gt_boxes = fg_gt_boxes.copy()\n temp_src_gt_boxes[:, 0:3] = fg_gt_boxes[:, 0:3] - fg_anchors[:, 0:3]\n bbox_src_targets[fg_inds, :] = temp_src_gt_boxes\n\n # Bbox regression loss has the form:\n # loss(x) = weight_outside * L(weight_inside * x)\n # Inside weights allow us to set zero loss on an element-wise basis\n # Bbox regression is only trained on positive examples so we set their\n # weights to 1.0 (or otherwise if config is different) and 0 otherwise\n # NOTE: we don't need bbox_inside_weights, remove it.\n # bbox_inside_weights = np.zeros((num_inside, box_ndim), dtype=np.float32)\n # bbox_inside_weights[labels == 1, :] = [1.0] * box_ndim\n\n # The bbox regression loss only averages by the number of images in the\n # mini-batch, whereas we need to average by the total number of example\n # anchors selected\n # Outside weights are used to scale each element-wise loss so the final\n # average over the mini-batch is correct\n # bbox_outside_weights = np.zeros((num_inside, box_ndim), dtype=np.float32)\n bbox_outside_weights = np.zeros((num_inside,), dtype=all_anchors.dtype)\n # uniform weighting of examples (given non-uniform sampling)\n if norm_by_num_examples:\n num_examples = np.sum(labels >= 0) # neg + pos\n num_examples = np.maximum(1.0, num_examples)\n bbox_outside_weights[labels > 0] = 1.0 / num_examples\n else:\n bbox_outside_weights[labels > 0] = 1.0\n # bbox_outside_weights[labels == 0, :] = 1.0 / num_examples\n\n # Map up to original set of anchors\n if inds_inside is not None:\n labels = unmap(labels, total_anchors, inds_inside, fill=-1)\n bbox_targets = unmap(bbox_targets, total_anchors, inds_inside, fill=0)\n bbox_src_targets = unmap(bbox_src_targets, total_anchors, inds_inside, fill=0)\n # bbox_inside_weights = unmap(\n # bbox_inside_weights, total_anchors, inds_inside, fill=0)\n bbox_outside_weights = unmap(\n bbox_outside_weights, total_anchors, inds_inside, fill=0)\n # return labels, bbox_targets, bbox_outside_weights\n ret = {\n 'labels': labels,\n 'bbox_targets': bbox_targets,\n 'bbox_outside_weights': bbox_outside_weights,\n 'assigned_anchors_overlap': fg_max_overlap,\n 'positive_gt_id': gt_pos_ids,\n 'bbox_src_targets': bbox_src_targets,\n }\n if inds_inside is not None:\n ret['assigned_anchors_inds'] = inds_inside[fg_inds]\n else:\n ret['assigned_anchors_inds'] = fg_inds\n return ret\n\n @property\n def num_anchors_per_location(self):\n num = 0\n for a_generator in self.anchor_generators:\n num += a_generator.num_anchors_per_localization\n return num\n\n def num_anchors_per_location_class(self, class_name):\n if isinstance(class_name, int):\n class_name = self.classes[class_name]\n assert class_name in self.classes\n class_idx = self.classes.index(class_name)\n return self.anchor_generators[class_idx].num_anchors_per_localization\n\n @property\n def classes(self):\n return [a.class_name for a in self.anchor_generators]\n" ]
[ [ "numpy.linspace", "numpy.einsum", "numpy.concatenate", "numpy.where", "numpy.reshape", "numpy.arange", "numpy.stack", "numpy.sin", "numpy.full", "numpy.zeros", "numpy.transpose", "numpy.meshgrid", "numpy.array", "numpy.sum", "numpy.maximum", "numpy.tile", "numpy.cos", "numpy.ones", "numpy.prod", "numpy.empty" ] ]
islamazhar/trees
[ "502565c5bf02503c7bece09cddd93f9368da02c3" ]
[ "trees/mcmc.py" ]
[ "import random\nimport logging\nimport numpy as np\n\nclass MetropolisHastingsSampler(object):\n\n def __init__(self, tree, X):\n self.tree = tree\n self.X = X\n self.last_move = None\n self.likelihoods = []\n\n def initialize_assignments(self):\n self.tree.initialize_from_data(self.X)\n\n def add_constraint(self, constraint):\n self.tree.add_constraint(constraint, self.X)\n\n def parent_move(self):\n logging.debug(\"Copying tree...\")\n tree = self.tree.copy()\n\n old_likelihood = self.tree.marg_log_likelihood()\n logging.debug(\"Old Marginal Likelihood: %f\" % old_likelihood)\n\n node = tree.choice()\n old_assignment = tree.get_assignment(node.parent)\n old_index, old_state = old_assignment\n subtree = node.detach()\n\n backward_likelihood = tree.log_prob_assignment(old_assignment)\n logging.debug(\"Backward Likelihood: %f\" % backward_likelihood)\n\n points = set()\n if len(tree.constraints) > 0:\n points = subtree.points()\n\n time = float('inf')\n\n try_counter = 0\n while time > subtree.get_state('time'):\n (assignment, forward_likelihood) = tree.sample_assignment(constraints=tree.constraints,\n points=points,\n state=old_state)\n logging.debug(\"Candidate assignment: %s\", str(assignment))\n (index, state) = assignment\n time = state['time']\n try_counter += 1\n if try_counter > 500:\n return\n\n tree.assign_node(subtree, assignment)\n new_likelihood = tree.marg_log_likelihood()\n\n logging.debug(\"New Marginal Likelihood: %f\" % old_likelihood)\n logging.debug(\"Forward Likelihood: %f\" % forward_likelihood)\n\n a = min(1, np.exp(new_likelihood + backward_likelihood - old_likelihood - forward_likelihood))\n if np.random.random() < a:\n logging.debug(\"Accepted new tree with probability: %f\" % a)\n self.tree = tree\n return\n logging.debug(\"Rejected new tree with probability: %f\" % a)\n\n def update_latent(self):\n self.tree.sample_latent()\n\n def sample(self):\n self.tree = self.tree.copy()\n random.choice([self.parent_move, self.update_latent])()\n self.likelihoods.append(self.tree.marg_log_likelihood())\n\nclass SPRSampler(object):\n\n def __init__(self, tree, X):\n self.tree = tree\n self.X = X\n self.last_move = None\n self.likelihoods = []\n\n def initialize_assignments(self):\n self.tree.initialize_assignments(np.arange(self.X.shape[0]))\n\n def add_constraint(self, constraint):\n self.tree.add_constraint(constraint, self.X)\n\n def parent_move(self):\n logging.debug(\"Copying tree...\")\n tree = self.tree.copy()\n\n old_likelihood = self.tree.marg_log_likelihood()\n logging.debug(\"Old Marginal Likelihood: %f\" % old_likelihood)\n logging.debug(\"Old Cost: %f\" % self.tree.cost())\n\n node = tree.choice()\n old_assignment = tree.get_assignment(node.parent)\n old_index, old_state = old_assignment\n subtree = node.detach()\n\n backward_likelihood = tree.log_prob_assignment(old_assignment)\n logging.debug(\"Backward Likelihood: %f\" % backward_likelihood)\n\n points = set()\n if len(tree.constraints) > 0:\n points = subtree.points()\n\n (assignment, forward_likelihood) = tree.sample_assignment(constraints=tree.constraints,\n points=points,\n state=old_state)\n logging.debug(\"Candidate assignment: %s\", str(assignment))\n (index, state) = assignment\n\n tree.assign_node(subtree, assignment)\n new_likelihood = tree.marg_log_likelihood()\n\n logging.debug(\"New Marginal Likelihood: %f\" % new_likelihood)\n logging.debug(\"New Cost: %f\" % tree.cost())\n logging.debug(\"Forward Likelihood: %f\" % forward_likelihood)\n\n a = min(1, np.exp(new_likelihood + backward_likelihood - old_likelihood - forward_likelihood))\n if np.random.random() < a:\n logging.debug(\"Accepted new tree with probability: %f\" % a)\n self.tree = tree\n return\n logging.debug(\"Rejected new tree with probability: %f\" % a)\n\n def sample(self):\n self.tree = self.tree.copy()\n self.parent_move()\n self.likelihoods.append(self.tree.marg_log_likelihood())\n" ]
[ [ "numpy.arange", "numpy.exp", "numpy.random.random" ] ]
flying-Yan/BCNN
[ "31ebb985cb1556b6f98aa71459ee74c3490bbe1d" ]
[ "models/cb_ResNet18.py" ]
[ "import torch\nimport torch.nn as nn\nfrom models.binarized_fun import * \n\nclass ResBlock_1(nn.Module):\n\n def __init__(self, inchannel):\n super(ResBlock_1, self).__init__()\n \n self.tanh = nn.Hardtanh(-1.3,1.3)\n \n self.conv1 = nn.Sequential(\n self.tanh,\n BinarizeConv2d(inchannel, inchannel, kernel_size = 3, stride = 1, padding = 1, bias = False),\n nn.BatchNorm2d(inchannel),\n self.tanh,\n BinarizeConv2d(inchannel, inchannel, kernel_size = 3, stride = 1, padding = 1, bias = False),\n nn.BatchNorm2d(inchannel),\n )\n \n def forward(self, x):\n \n y = self.conv1(x)\n y = x + y\n\n return y\n \nclass ResBlock_2(nn.Module):\n\n def __init__(self, inchannel, outchannel):\n super(ResBlock_2, self).__init__()\n \n \n self.tanh = nn.Hardtanh(-1.3,1.3)\n \n self.conv1 = nn.Sequential(\n self.tanh,\n BinarizeConv2d(inchannel, outchannel, kernel_size = 3, stride = 2, padding = 1, bias = False),\n nn.BatchNorm2d(outchannel),\n self.tanh,\n BinarizeConv2d(outchannel, outchannel, kernel_size = 3, stride = 1, padding = 1, bias = False),\n nn.BatchNorm2d(outchannel),\n )\n self.shortcut = nn.Sequential(\n self.tanh,\n BinarizeConv2d(inchannel, outchannel, kernel_size = 1, stride = 2, padding = 0, bias = False),\n nn.BatchNorm2d(outchannel),\n )\n\n \n def forward(self, x):\n \n y = self.conv1(x)\n x = self.shortcut(x)\n y = x + y\n\n return y\n \nclass Net(nn.Module):\n\n def __init__(self, num_classes=10):\n super(Net, self).__init__()\n \n self.tanh = nn.Hardtanh(-1.3,1.3,inplace=True)\n\n self.conv1 = nn.Sequential(\n nn.BatchNorm2d(3, affine = False),\n nn.Conv2d(3, 64, kernel_size = 3, stride = 1, padding = 1, bias = False),\n nn.BatchNorm2d(64),\n )\n\n \n self.block_1_1 = ResBlock_1(64)\n self.block_1_2 = ResBlock_1(64)\n \n self.block_2_1 = ResBlock_2(64,128)\n self.block_2_2 = ResBlock_1(128)\n\n self.block_3_1 = ResBlock_2(128,256)\n self.block_3_2 = ResBlock_1(256)\n\n self.block_4_1 = ResBlock_2(256,512)\n self.block_4_2 = ResBlock_1(512)\n\n self.fc = nn.Sequential(\n nn.Linear(512,10)\n )\n\n self.avg = nn.Sequential(\n nn.ReLU(),\n nn.AvgPool2d(4,4),\n )\n \n self.regime = {\n 0: {'optimizer': 'Adam', 'betas': (0.9, 0.999),'lr': 5e-3},\n 80: {'lr': 1e-3},\n 150: {'lr': 5e-4},\n 200: {'lr': 1e-4},\n 240: {'lr': 5e-5},\n 270: {'lr': 1e-5}\n }\n\n \n \n def forward(self, x):\n \n x = self.conv1(x) \n \n x = self.block_1_1(x)\n x = self.block_1_2(x)\n\n x = self.block_2_1(x)\n x = self.block_2_2(x)\n\n x = self.block_3_1(x)\n x = self.block_3_2(x)\n\n x = self.block_4_1(x)\n x = self.block_4_2(x)\n\n x = self.avg(x)\n\n x = x.view(x.size(0),-1)\n x = self.fc(x)\n\n return x\n\n\n\n" ]
[ [ "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.Hardtanh" ] ]
TauferLab/SOMOSPIE
[ "512bfc1a287d014f3c3d885a22b23825ce536c92" ]
[ "deprecated/hyppo_testing/hypppo2.py" ]
[ "#!/usr/bin/env python3\n\n# Code by Travis Johnston, 2017.\n# Modified and parallelized by Danny Rorabaugh, 2018/9.\n# HYbrid Parallel Piecewise POlynomial.\n\n\nimport argparse, csv, random\nimport numpy as np\n### https://docs.python.org/3.1/library/itertools.html#itertools.combinations_with_replacement\nfrom itertools import combinations_with_replacement as cwr \nfrom time import time\nimport os\n\n\n# Parallelization.\ndef init_parallel():\n print(f\"There are {os.cpu_count()} cores, of which {len(os.sched_getaffinity(0))} are available.\")\n\n import findspark\n findspark.init()\n # https://spark.apache.org/docs/0.9.0/api/pyspark/\n global SC\n from pyspark import SparkContext as SC\n #sc = SC.getOrCreate()\n #data = sc.parallelize(range(10))\n #print(data.collect())\n #sc.close()\n\n\n### This function expects a list of coefficients for the polynomial in order: \n### This function expects the degree of the polynomial (integer).\n### This function expects a point (list of floats) of where to evaluate the polynomial.\n### This function returns the value of the polynomial evaluated at the point provided.\ndef evaluate_polynomial(coefficients, degree, point):\n if degree == 0:\n return coefficients[0]\n \n monomials = [ np.product(x) for x in cwr([1.0] + point, degree) ]\n return sum( [ a[0]*a[1] for a in zip(coefficients, monomials) ] )\n\n\n### independent_variable_points is a list of settings for the independent variables that were observed.\n### dependent_variable_values is a list of observed values of the dependent variable.\n### It is important that for each i the result of independent_variable_points[i] is stored as dependent_variable_values[i].\n### degree is the degree of the polynomial to build.\n### This function returns the list of coefficients of the best fit polynomial surface of degree \"degree\".\ndef determine_coefficients(independent_variable_points, dependent_variable_values, degree):\n \n ### If degree==0, cwr returns [()], then np.product[()] is 1.0.\n A = [ [np.product(x) for x in cwr([1.0] + iv, degree)] for iv in independent_variable_points ]\n Z = np.array(dependent_variable_values)\n \n #### https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html\n #### TODO: Add logging of if solution is over-/under-determined, and error in latter case.\n coef = np.linalg.lstsq(A, Z, rcond=None)[0]\n return list( coef )\n\n### data_points is a list of the observed independent variable settings.\n### specific_points is one chosen setting of the independent variables.\n### k is the number of nearest neighbors to find.\n### scale indicates how the coordinates can/should be re-scaled before the distance metric is computed.\n### For example, if the points are of the form (x, y) and x's are measured in 1's and y's are measured by 100's.\n### Then, it may be desirable to multiply the x-values by 100 to bring them onto the same scale as the y-values.\n### To do this, set scale=[100, 1]. The default for CONUS is [1, 2],\n### since each degree latitude (y step) is about twice as long as each degree longitude (x step).\n### This function returns a list of indices (in data_points) of the k nearest neighbors.\ndef indices_of_kNN_ndim(data_points, specific_point, k, scale=[], norm=2):\n if len(data_points)==0:\n print(\"Error: data_points empty!\")\n return False\n if len(data_points[0])!=len(specific_point):\n print(\"Error: specific_point not same dim as elements of data_points!\")\n return False\n\n if scale:\n if len(scale)!=len(specific_point):\n print(\"Error: scale specified, but of different length then data!\")\n return False\n scale = np.array(scale)\n data_points = [ np.array(x)*scale for x in data_points ]\n specific_point = np.array(specific_point)*scale\n \n distances = [ sum( (x - specific_point)**norm ) for x in data_points ]\n indices = np.argsort( distances, kind='mergesort' )[:k]\n return indices\n\n\n### Find the standard deviation of all columns of df\ndef compute_scale(df):\n return [col.std(ddof=0) for col in df.columns]\n\n\n### independent_data_points is a list of the observed independent variables to build models from.\n### dependent_data_points is a list of the observed dependent variables (in the same order).\n### k is the number of folds or partitions to divide the data into.\n### num_random_partitions is the number of times the data is randomly partitioned (for averaging over many runs).\n### D is a strict upper bound on degree.\ndef kfold_crossvalidation(independent_data_points, dependent_data_points, k, num_random_partitions, D):\n \n n = len(independent_data_points) ### Number of data points.\n \n ### A list of 0's of same length as possible degrees.\n Total_SSE = [0]*D\n \n indices = list(range(n))\n \n for iteration in range(num_random_partitions):\n ### Randomly partition the data into k sets as equally sized as possible.\n\n ### Get a new random shuffling of the indices.\n random.shuffle(indices)\n Folds = [ [indices[i] for i in range(fold, n, k)] for fold in range(k) ]\n\n for d in range(D):\n\n ### Build k models of degree d (each model reserves one set as testing set).\n for testing_fold in range(k):\n testing_independent_data = [ independent_data_points[i] for i in Folds[testing_fold] ]\n testing_dependent_data = [ dependent_data_points[i] for i in Folds[testing_fold] ]\n \n model_independent_data = []\n model_dependent_data = []\n for fold in range(k):\n if fold != testing_fold:\n model_independent_data += [ independent_data_points[i] for i in Folds[fold] ]\n model_dependent_data += [ dependent_data_points[i] for i in Folds[fold] ]\n \n ### Get the polynomial built from the model data of degree d.\n try:\n coefficients = determine_coefficients( model_independent_data, model_dependent_data, d )\n \n ### Predict the testing points and add the error to the Total_SSE[d].\n for x, z in zip(testing_independent_data, testing_dependent_data):\n ### The square of the difference between polynomial prediction and observed value (z) at x.\n Total_SSE[d] += (evaluate_polynomial(coefficients, d, x) - z)**2 \n #print(f\"d: {d}; Total_SSA[d]: {Total_SSE[d]}; \\ncoefficients: {coefficients}\\n\")\n\n except:\n Total_SSE[d] += 99999999999 ### Basically, this d was too big.\n\n ### Return index of minimum Total_SSE.\n ### Note: Total_SSE[i] corresponds to polynomial of degree i.\n winning_degree = Total_SSE.index(min(Total_SSE))\n #print(f\"n: {n}; D: {D}; winning_degree: {winning_degree}; \\nTotal_SSE: {Total_SSE}\\n\")\n \n print(f\"Total_SSE: {Total_SSE}\")\n return [winning_degree] + Total_SSE\n\n\n### Ideal for small sample sizes\ndef leave_one_out_crossvalidation(independent_data_points, dependent_data_points, D):\n return kfold_crossvalidation(independent_data_points, dependent_data_points, len(independent_data_points), 1, D)\n\n\n### Main function for a single data point.\n### This is the function that will be called independently many time.\n### If this can be run on every element of a Spark RDD, we're golden.\ndef model_at_point(x, Independent_Data, Dependent_Data, args):#K, maxDegree, model=\"HYPPO\", norm=2):\n ##K, maxDegree, model, norm)\n \n K = args.k\n maxDegree = args.degree\n model = args.model\n norm = args.norm\n\n ### Find Nearest neighbors\n indices_of_nearest_neighbors = indices_of_kNN_ndim(Independent_Data, x, K, norm=norm)\n\n ### Select the data associated with the nearest neighbors for use with modeling\n selected_independent_data = [ Independent_Data[i] for i in indices_of_nearest_neighbors ]\n selected_dependent_data = [ Dependent_Data[i] for i in indices_of_nearest_neighbors ]\n\n ### Determine the best polynomial degree\n if model == \"KNN\":\n ### Setting the degree to 0 forces us to just average the nearest neighbors.\n ### This is exactly kNN (a degree 0 polynomial).\n degree = 0\n degree_errors = []\n \n elif model == \"SBM\":\n degree_errors = kfold_crossvalidation(selected_independent_data, selected_dependent_data, K, 10, D)\n degree = degree_errors.pop(0)\n \n elif model==\"HYPPO\":\n D = maxDegree + 1\n degree_errors = leave_one_out_crossvalidation(selected_independent_data, selected_dependent_data, D)\n degree = degree_errors.pop(0)\n\n else:\n raise ValueError(f\"\\\"{model}\\\" is not a valid model.\")\n\n ### Compute the coefficients of the \"best\" polynomial of degree degree.\n coefficients = determine_coefficients(selected_independent_data, selected_dependent_data, degree)\n #print(f\"coefficients: {coefficients}\")\n \n ### Using the surface, predict the value of the point.\n z = evaluate_polynomial(coefficients, degree, x)\n \n #if degree > 0:\n # print(f\"x: {x}; \\nindices_of_nearest_neighbors: {indices_of_nearest_neighbors}; \\ndegree: {degree}; coefficients: {coefficients}; \\nz: {z}\\n\")\n return [z, degree] + degree_errors\n\n\n### input1 and input2 are arrays or ndarrays.\n### Columns index 0 and 1 of input1 and input2 are the x/y-coordinates.\n### input1 should have 1 more column than input2, the column with the dependent variable.\n### depIndex is the index of the dependent variable column in input1.\n### model is one of [\"HYPPO\", \"KNN\", \"SBM\"].\n### Implementations of HYPPO and SBM are not well-suited for high dimensional data.\n### k is the number of nearest neighbors for HYPPO or KNN (is overridden for SBM).\ndef main(input1, input2, args):#depIndex=2, model=\"HYPPO\", maxDegree=4, k=6, indepStart=0, indepCount=2, scale=[], norm=2, parallel=False):\n\n #depIndex=args.depIndex\n #model=args.model\n #maxDegree=args.degree\n indepStart=args.skipVars\n indepCount=args.variables\n #parallel=args.parallel\n scale=args.scale\n #norm=args.norm\n\n Independent_Data = []\n Dependent_Data = []\n for line in input1:\n numbers = list(line)\n Dependent_Data.append(numbers.pop(args.depIndex))\n Independent_Data.append(np.array(numbers[indepStart:indepStart+indepCount]))\n #Coordinate_Data.append(np.array(numbers[:2]))\n\n if scale:\n if len(scale)!=indepCount:\n print(\"Error: scale was specified, but isn't the same length as the sepcified number of independent variables!\")\n scale = np.array(scale)\n else:\n scale = 1/np.std(Independent_Data, axis=0)\n print(scale)\n\n print(f\"Dependent_Data is an array of length {len(Dependent_Data)} with first elements:\\n{Dependent_Data[:5]}\\n\")\n print(f\"Independent_Data is a length-{len(Independent_Data)} array of arrays with first element:\\n{Independent_Data[0]}\\n\") \n \n Independent_Data = [row*scale for row in Independent_Data]\n print(f\"Independent_Data post-scaling is an array of arrays with first element:\\n{Independent_Data[0]}\\n\") \n \n ### Set K, the number of nearest neighbors to use when building the model.\n if args.model == \"SBM\":\n args.k = len(Dependent_Data) - 1\n print(f\"Each local model will be generated with {args.k} nearest neighbors.\\n\")\n\n K = args.k\n \n t0 = time()\n \n def MaP(x):\n a = x[0]\n b = x[1]\n x = np.array(x[indepStart:indepStart+indepCount])*scale\n local_model = model_at_point(x, Independent_Data, Dependent_Data, args)#K, maxDegree, model, norm)\n \n return [a, b] + local_model\n \n if args.parallel: \n init_parallel()\n sc = SC.getOrCreate()\n data = sc.parallelize(input2)\n data = data.map(MaP)\n output = data.collect()\n sc.stop()\n \n else:\n output = [MaP(x) for x in input2]\n \n print(f\"It took {time() - t0} seconds to perform model_at_point on all the evaluation points.\")\n \n return output\n\n\n'''\nargs_fileName = \"test.csv\"\nargs_delimiter = \",\"\nargs_headerRows = 1\nargs_eval = \"eval.csv\"\nargs_depIndex = 2\nargs_model = \"HYPPO\"\nargs_k = 6\nargs_variables = 2\nargs_skipVars = 0\nargs_scale = '1,2'\nargs_out = f\"output{args_variables}d-{args_k}.csv\"\nargs_parallel = True\n'''\n\nif __name__ == \"__main__\": \n parser = argparse.ArgumentParser()\n parser.add_argument( \"fileName\", help=\"The path to the csv file containing the training data.\")\n parser.add_argument( \"-m\", \"--model\", help=\"The type of model to build.\", choices=[\"HYPPO\", \"KNN\", \"SBM\"], default=\"HYPPO\")\n parser.add_argument( \"-k\", \"--k\", help=\"The number of nearest neighbors to use for either the KNN or HYPPO model.\", type=int, default=6)\n parser.add_argument( \"-e\", \"--eval\", help=\"Name of file where the evaluation points are stored.\")\n parser.add_argument( \"-o\", \"--out\", help=\"Name of file where prediction is to be stored.\")\n parser.add_argument( \"-i\", \"--depIndex\", help=\"Index of column in fileName with dependent variable to be tested for building a model.\", type=int, default=2)\n parser.add_argument( \"-r\", \"--headerRows\", help=\"Number of rows to ignore, being header row(s).\", type=int, default=1)\n parser.add_argument( \"-d\", \"--delimiter\", help=\"Delimiter of fileName and eval.\", default=\",\")\n parser.add_argument( \"-D\", \"--degree\", help=\"Maximum polynomial degree.\", type=int, default=4)\n parser.add_argument( \"-v\", \"--variables\", help=\"Number of independent variables; if unspecified, will use only first two columns in the file.\", type=int, default=2)\n parser.add_argument( \"-s\", \"--skipVars\", help=\"Number of independent variables to skip; e.g., 2 if you don't wish to use lon/lat.\", type=int, default=0)\n parser.add_argument( \"-S\", \"--scale\", help=\"Specify the scale to multiply your independent variables by; for example -s0 -v2 -S1,2. Uses reciprocals of standard deviations if unspecified.\")\n parser.add_argument( \"-N\", \"--norm\", help=\"Specify N for l_N norm; default is 2 (Euclidean).\", type=int, default=2)\n parser.add_argument( \"-p\", \"--parallel\", help=\"1 to run in parallel with Spark; 0 otherwise (the default).\", type=int, default=0)\n args=parser.parse_args()\n\n ### args.fileName contains the data from which to build the model.\n ### It is expected that the file be comma separated and have a header row.\n ### Default format is x, y, z, c1, ..., cm.\n ### Where x and y are geographic coordinates, z is the observed dependent variable, \n ### and c1, ..., cm are additional independent variables.\n ### args.eval should be the same format, but lacking the z column.\n \n ### Commandline test\n ### ./hypppo2.py ../data/2012/t-postproc/8.5.csv -e ../data/2012/e-postproc/8.5.csv -o out2.csv -v3 -s2 -p1 -D4 -k9\n \nif args.scale:\n args.scale = [float(s) for s in args.scale.split(',')]\nelse:\n args.scale = []\n\noriginal_values = np.loadtxt(args.fileName, delimiter=args.delimiter, skiprows=args.headerRows)\nprint(f\"\\n{len(original_values)} lines of original data have been loaded from {args.fileName}.\\n\")\n\nvalues_to_model = np.loadtxt(args.eval, delimiter=args.delimiter, skiprows=args.headerRows)\nprint(f\"\\n{len(values_to_model)} lines of evaluation data have been loaded from {args.eval}.\\n\")\n\noutput = main(original_values, values_to_model, args)#depIndex=args.depIndex, model=args.model, maxDegree=args.degree, \n #k=args.k, indepStart=args.skipVars, indepCount=args.variables, parallel=args.parallel, scale=args.scale, norm=args.norm)\n\nnp.savetxt(args.out, output, delimiter=\",\", fmt='%f')\n" ]
[ [ "numpy.product", "numpy.linalg.lstsq", "numpy.std", "numpy.savetxt", "numpy.argsort", "numpy.array", "numpy.loadtxt" ] ]
Stargrazer82301/CAAPR
[ "4adead7dd85072cf14e2afb0f6b99b4f92d34201" ]
[ "CAAPR/CAAPR_Main.py" ]
[ "# Import smorgasbord\r\nimport sys\r\nimport os\r\nimport gc\r\nimport time\r\nimport random\r\n#import warnings\r\n#warnings.filterwarnings('ignore')\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport multiprocessing as mp\r\nimport CAAPR\r\nimport CAAPR.CAAPR_IO\r\nimport CAAPR.CAAPR_Pipeline\r\nimport pdb\r\n\r\n\r\n\r\n\r\n# Define the function that runs the CAAPR pipeline\r\ndef Run(bands_table_path = '../CAAPR_Example/CAAPR_Band_Table.csv',\r\n sources_table_path = '../CAAPR_Example/CAAPR_Source_Table.csv',\r\n output_dir_path = os.path.join(os.getcwd(),'CAAPR_Output'),\r\n temp_dir_path = os.path.join(os.getcwd(),'CAAPR_Temp'),\r\n fit_apertures = True,\r\n aperture_table_path = None,#'CAAPR_Aperture_Table.csv',\r\n photom_table_path = None,\r\n expansion_factor = 1.25,\r\n polysub = True,\r\n starsub = True,\r\n do_photom = True,\r\n extinction_corr = True,\r\n parallel = True,\r\n n_proc = mp.cpu_count()-2,\r\n thumbnails = True,\r\n debug = False,\r\n verbose = True,\r\n messy = False\r\n ):\r\n\r\n\r\n\r\n # Create dictionary of kwarg values\r\n kwargs_dict = {'sources_table_path':sources_table_path,\r\n 'bands_table_path':bands_table_path,\r\n 'output_dir_path':output_dir_path,\r\n 'temp_dir_path':temp_dir_path,\r\n 'fit_apertures':fit_apertures,\r\n 'aperture_table_path':aperture_table_path,\r\n 'photom_table_path':photom_table_path,\r\n 'expansion_factor':expansion_factor,\r\n 'polysub':polysub,\r\n 'starsub':starsub,\r\n 'do_photom':do_photom,\r\n 'extinction_corr':extinction_corr,\r\n 'parallel':parallel,\r\n 'n_proc':n_proc,\r\n 'thumbnails':thumbnails,\r\n 'debug':debug,\r\n 'verbose':verbose,\r\n 'messy':messy}\r\n\r\n\r\n\r\n # Read in sources table, and convert into dictionary\r\n sources_dict = CAAPR.CAAPR_IO.SourcesDictFromCSV(sources_table_path)\r\n\r\n # Read in bands table, and convert into dictionary\r\n bands_dict = CAAPR.CAAPR_IO.BandsDictFromCSV(bands_table_path)\r\n\r\n # Prepare output directory\r\n CAAPR.CAAPR_IO.OutputDirPrepare(kwargs_dict)\r\n\r\n # Prepare temp directory, deleting any pre-existing directory at the specified location\r\n CAAPR.CAAPR_IO.TempDirPrepare(kwargs_dict)\r\n\r\n\r\n\r\n # Make inviolate copy of original band directories, to insure against over-writing when temp cutout directories are handled later\r\n for band in bands_dict.keys():\r\n bands_dict[band]['band_dir_inviolate'] = bands_dict[band]['band_dir']\r\n\r\n # Record timestamp\r\n kwargs_dict['timestamp'] = str(time.time()).replace('.','-')\r\n\r\n\r\n\r\n # If no aperture table file provided, and aperture-fitting is requested, create and prepare CSV file to store aperture dimensions for each source\r\n kwargs_dict = CAAPR.CAAPR_IO.ApertureTablePrepare(kwargs_dict)\r\n\r\n # If no photometry table path provided, and photometry is requested, create and prepare CSV file to store photometry output for each source\r\n kwargs_dict = CAAPR.CAAPR_IO.PhotomTablePrepare(kwargs_dict)\r\n\r\n\r\n\r\n\r\n # Randomise order of source dictionary keys (to \"smooth out\" average system resource usage)\r\n source_dict_keys = sources_dict.keys()\r\n random.shuffle(source_dict_keys)\r\n\r\n # Loop over each target source, processing in turn\r\n time_list = [time.time()]\r\n if verbose: print('[CAAPR] '+str(len(source_dict_keys))+' target objects to be processed.')\r\n for source in source_dict_keys:\r\n source_dict = sources_dict[source]\r\n CAAPR.CAAPR_Pipeline.PipelineMain(source_dict, bands_dict, kwargs_dict)\r\n\r\n # Estimate time until completions, and collect garbage\r\n CAAPR.CAAPR_Pipeline.TimeEst(time_list, len(source_dict_keys), output_dir_path, source_dict, kwargs_dict)\r\n gc.collect()\r\n\r\n\r\n\r\n\r\n\r\n# Commence main task; generally you want to be calling CAAPR as a function, but it's useful to initiate a run this way for development and testing\r\nif __name__ == \"__main__\":\r\n\r\n # Set parameters, and run function\r\n testing = True\r\n parallel = False\r\n starsub = True\r\n fit_apertures = True\r\n if fit_apertures==True:\r\n aperture_table_path = None\r\n elif fit_apertures==False:\r\n aperture_table_path = '../DustPedia/CAAPR_Aperture_Table_Test.csv'\r\n if testing:\r\n Run(temp_dir_path='/home/saruman/spx7cjc/DustPedia/CAAPR_Temp',\r\n n_proc=4,\r\n sources_table_path='../DustPedia/CAAPR_Source_Table_Test.csv',\r\n starsub=starsub,\r\n fit_apertures=fit_apertures,\r\n do_photom=False,\r\n aperture_table_path=aperture_table_path,\r\n parallel=parallel,\r\n debug=False,\r\n thumbnails=True)\r\n\r\n # Jubilate\r\n print('All done!')" ]
[ [ "matplotlib.use" ] ]
HXPRedBlue/mmocr
[ "914613d53484712be67c38d50bb902a218884b24" ]
[ "mmocr/apis/inference.py" ]
[ "import numpy as np\nimport torch\nfrom mmcv.ops import RoIPool\nfrom mmcv.parallel import collate, scatter\nfrom mmdet.datasets import replace_ImageToTensor\nfrom mmdet.datasets.pipelines import Compose\n\n\ndef disable_text_recog_aug_test(cfg, set_types=None):\n \"\"\"Remove aug_test from test pipeline of text recognition.\n Args:\n cfg (mmcv.Config): Input config.\n set_types (list[str]): Type of dataset source. Should be\n None or sublist of ['test', 'val']\n\n Returns:\n cfg (mmcv.Config): Output config removing\n `MultiRotateAugOCR` in test pipeline.\n \"\"\"\n assert set_types is None or isinstance(set_types, list)\n if set_types is None:\n set_types = ['val', 'test']\n for set_type in set_types:\n if cfg.data[set_type].pipeline[1].type == 'MultiRotateAugOCR':\n cfg.data[set_type].pipeline = [\n cfg.data[set_type].pipeline[0],\n *cfg.data[set_type].pipeline[1].transforms\n ]\n assert_if_not_support_batch_mode(cfg, set_type)\n\n return cfg\n\n\ndef assert_if_not_support_batch_mode(cfg, set_type='test'):\n if cfg.data[set_type].pipeline[1].type == 'ResizeOCR':\n if cfg.data[set_type].pipeline[1].max_width is None:\n raise Exception('Batch mode is not supported '\n 'since the image width is not fixed, '\n 'in the case that keeping aspect ratio but '\n 'max_width is none when do resize.')\n\n\ndef model_inference(model, imgs, batch_mode=False):\n \"\"\"Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):\n Either image files or loaded images.\n batch_mode (bool): If True, use batch mode for inference.\n Returns:\n result (dict): Predicted results.\n \"\"\"\n\n if isinstance(imgs, (list, tuple)):\n is_batch = True\n if not isinstance(imgs[0], (np.ndarray, str)):\n raise AssertionError('imgs must be strings or numpy arrays')\n\n elif isinstance(imgs, (np.ndarray, str)):\n imgs = [imgs]\n is_batch = False\n else:\n raise AssertionError('imgs must be strings or numpy arrays')\n\n is_ndarray = isinstance(imgs[0], np.ndarray)\n\n cfg = model.cfg\n\n if batch_mode:\n cfg = disable_text_recog_aug_test(cfg, set_types=['test'])\n\n device = next(model.parameters()).device # model device\n\n if is_ndarray:\n cfg = cfg.copy()\n # set loading pipeline type\n cfg.data.test.pipeline[0].type = 'LoadImageFromNdarray'\n\n cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)\n test_pipeline = Compose(cfg.data.test.pipeline)\n\n datas = []\n for img in imgs:\n # prepare data\n if is_ndarray:\n # directly add img\n data = dict(img=img)\n else:\n # add information into dict\n data = dict(img_info=dict(filename=img), img_prefix=None)\n\n # build the data pipeline\n data = test_pipeline(data)\n # get tensor from list to stack for batch mode (text detection)\n if batch_mode:\n if cfg.data.test.pipeline[1].type == 'MultiScaleFlipAug':\n for key, value in data.items():\n data[key] = value[0]\n datas.append(data)\n\n if isinstance(datas[0]['img'], list) and len(datas) > 1:\n raise Exception('aug test does not support '\n f'inference with batch size '\n f'{len(datas)}')\n\n data = collate(datas, samples_per_gpu=len(imgs))\n\n # process img_metas\n if isinstance(data['img_metas'], list):\n data['img_metas'] = [\n img_metas.data[0] for img_metas in data['img_metas']\n ]\n else:\n data['img_metas'] = data['img_metas'].data\n\n if isinstance(data['img'], list):\n data['img'] = [img.data for img in data['img']]\n if isinstance(data['img'][0], list):\n data['img'] = [img[0] for img in data['img']]\n else:\n data['img'] = data['img'].data\n\n if next(model.parameters()).is_cuda:\n # scatter to specified GPU\n data = scatter(data, [device])[0]\n else:\n for m in model.modules():\n assert not isinstance(\n m, RoIPool\n ), 'CPU inference with RoIPool is not supported currently.'\n\n # forward the model\n with torch.no_grad():\n results = model(return_loss=False, rescale=True, **data)\n\n if not is_batch:\n return results[0]\n else:\n return results\n\n\ndef text_model_inference(model, input_sentence):\n \"\"\"Inference text(s) with the entity recognizer.\n\n Args:\n model (nn.Module): The loaded recognizer.\n input_sentence (str): A text entered by the user.\n\n Returns:\n result (dict): Predicted results.\n \"\"\"\n\n assert isinstance(input_sentence, str)\n\n cfg = model.cfg\n test_pipeline = Compose(cfg.data.test.pipeline)\n data = {'text': input_sentence, 'label': {}}\n\n # build the data pipeline\n data = test_pipeline(data)\n if isinstance(data['img_metas'], dict):\n img_metas = data['img_metas']\n else:\n img_metas = data['img_metas'].data\n\n assert isinstance(img_metas, dict)\n img_metas = {\n 'input_ids': img_metas['input_ids'].unsqueeze(0),\n 'attention_masks': img_metas['attention_masks'].unsqueeze(0),\n 'token_type_ids': img_metas['token_type_ids'].unsqueeze(0),\n 'labels': img_metas['labels'].unsqueeze(0)\n }\n # forward the model\n with torch.no_grad():\n result = model(None, img_metas, return_loss=False)\n return result\n" ]
[ [ "torch.no_grad" ] ]
rileydr/SSFC-Data
[ "b9169dfe47c939f4c7a49d9e53e05ba90d066301" ]
[ "code/utilities/densmore_v3.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.preprocessing import PolynomialFeatures, StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\n\nimport knockknock\nkk_url = \"https://hooks.slack.com/services/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\"\nkk_channel_name = 'jupyter-notebook'\nkk_users = ['@rileyrobertsond']\n\[email protected]_sender(webhook_url=kk_url, channel=kk_channel_name, user_mentions=kk_users)\ndef sum_stats(dataframe, filename_string, y_variable):\n \n import sys\n with open(f'{filename_string}', 'w') as f:\n\n ## COLUMN HEADERS \n print('index' ,',', # 0\n 'feature' ,',', #\n 'dtype' ,',', # \n 'non-nulls' ,',', # 'alias: col_count'\n 'nulls' ,',', #\n 'pct_nulls' ,',', # 5\n 'unique' ,',', #\n 'mode' ,',', #\n 'mode_count',',', #\n 'min' ,',', #\n 'q1' ,',', # 10\n 'median' ,',', #\n 'q3' ,',', #\n 'max' ,',', # \n 'mean' ,',', #\n 'stdev' ,',', # 15\n 'var' ,',', #\n 'skew' ,',', #\n 'kurtosis' ,',', #\n 'y_corr', #\n file=f\n )\n col_index = -1 # for index column, starting the numbering at -1 so first row is 0\n \n ## VARIABLE ASSIGNMENTS (DETERMINED BY DATATYPE) \n for (columnName, columnData) in dataframe.iteritems():\n ## OBJECTS\n if columnData.dtype == object or columnData.dtype == str: \n col_index += 1 # 0\n col_name = columnName #\n col_dtype = columnData.dtype #\n col_count = columnData.count() #non-nulls #\n col_nulls = columnData.isnull().sum() #\n col_pct_nulls = round((columnData.isnull().sum())/len(columnData),2) # 5\n col_unique = columnData.nunique() # \n col_mode = list(columnData.value_counts().items())[0][0] #\n col_mode_count = columnData.value_counts().max() #\n \n col_min = '' #\n col_q1 = '' # 10\n col_median = '' #\n col_q3 = '' #\n col_max = '' #\n \n col_mean = '' #\n col_stdev = '' # 15\n col_var = '' #\n col_skew = '' # \n col_kurt = '' #\n col_y_corr = '' #\n \n ## NUMERICS\n else: \n col_index += 1 # 0\n col_name = columnName # \n col_dtype = columnData.dtype #\n col_count = columnData.count() #non-nulls # \n col_nulls = columnData.isnull().sum() #\n col_pct_nulls = round((columnData.isnull().sum())/len(columnData),2) # 5\n col_unique = columnData.nunique() # \n col_mode = columnData.mode()[0] #\n col_mode_count = columnData.value_counts().max() # \n \n col_min = columnData.min() #\n col_q1 = columnData.quantile(.25) # 10\n col_median = columnData.median() #\n col_q3 = columnData.quantile(.75) # \n col_max = columnData.max() #\n \n col_mean = columnData.mean() #\n col_stdev = columnData.std() # 15\n col_var = columnData.var() #\n col_skew = columnData.skew() #\n col_kurt = columnData.kurtosis() #\n try:\n col_y_corr = columnData.corr(dataframe[y_variable]) #\n except:\n col_y_corr = ''\n\n ## PRINT VARIABLES\n print(col_index ,',', # 0\n col_name ,',', #\n col_dtype ,',', #\n col_count ,',', #\n col_nulls ,',', #\n col_pct_nulls ,',', # 5\n col_unique ,',', #\n col_mode ,',', #\n col_mode_count ,',', #\n col_min ,',', #\n col_q1 ,',', # 10 \n col_median ,',', #\n col_q3 ,',', # \n col_max ,',', # \n col_mean ,',', #\n col_stdev ,',', # 15\n col_var ,',', #\n col_skew ,',', #\n col_kurt ,',', #\n col_y_corr, #\n file=f\n )\n\n### VALUE COUNT DISPLAYS\n\ndef val_counts(dataframe):\n\n for (columnName, columnData) in dataframe.iteritems():\n\n print(f'Column Name: {columnName}')\n print(f'Unique Values: {dataframe[columnName].nunique()}')\n print('')\n print(dataframe[columnName].value_counts())\n print('')\n print('_________________________')\n print('')\n print('')\n\n\ndef all_uniques(dataframe):\n\n for (columnName, columnData) in dataframe.iteritems():\n\n print(f'Column Name: {columnName}')\n print(f'Unique Values: {dataframe[columnName].nunique()}')\n print(f'Unique Values: {np.sort(columnData.unique())}')\n print('')\n print(dataframe[columnName].value_counts())\n print('_________________________')\n print('')\n print('')\n\n### PLOTS\n\ndef rapid_plots(dataframe, y_var, min_corr, plt_style='seaborn-darkgrid'):\n plt.style.use(plt_style) # removable or customizable\n\n plot_list = [] \n\n for (columnName, columnData) in dataframe.iteritems():\n if columnData.dtype != object:\n if (abs(columnData.corr(dataframe[y_var])) > min_corr) and columnData.corr(dataframe[y_var]) != 1:\n plt.figure()\n viz = sns.regplot(x=dataframe[columnName], y=dataframe[y_var], color='steelblue')\n plt.title(f'{columnName}: {round(columnData.corr(dataframe[y_var]), 5)}')\n plot_list.append(viz)\n else:\n pass\n else:\n pass\n \n return plot_list\n\ndef rapid_dists(dataframe, columns, bins=50, figsize=(8,3), plt_style='seaborn-darkgrid'):\n plt.style.use(plt_style) # removable or customizable\n\n plot_list = [] \n\n for column in columns:\n plt.figure(figsize=(figsize))\n dataframe[column].plot(kind='hist', bins=bins)\n plt.title(column)\n plt.show()\n \n return None\n\n\n### QUICK MODELS\n\[email protected]_sender(webhook_url=kk_url, channel=kk_channel_name, user_mentions=kk_users)\ndef quickmod_knns(X, y, klist,random_state=74):\n for k in klist:\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state)\n sc = StandardScaler()\n Z_train = sc.fit_transform(X_train)\n Z_test = sc.transform(X_test)\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(Z_train, y_train)\n print(f'𝑘 = {k}')\n print(f'Train Accuracy: {knn.score(Z_train, y_train)}')\n print(f' Test Accuracy: {knn.score(Z_test, y_test)}')\n print('')\n\[email protected]_sender(webhook_url=kk_url, channel=kk_channel_name, user_mentions=kk_users)\ndef quickmod_logregsa_nlp(X_train, y_train, X_test, y_test, alist, penalty, solver='liblinear', random_state=74):\n for a in alist:\n sc = StandardScaler(with_mean=False)\n Z_train = sc.fit_transform(X_train)\n Z_test = sc.transform(X_test)\n logreg = LogisticRegression(penalty=penalty, C=(1/a), solver=solver, random_state=random_state)\n logreg.fit(Z_train, y_train)\n print(f'𝛼 = {a}')\n print(f'𝐶 = {1/a}')\n print(f'Train Accuracy: {logreg.score(Z_train, y_train)}')\n print(f' Test Accuracy: {logreg.score(Z_test, y_test)}')\n print('')\n\[email protected]_sender(webhook_url=kk_url, channel=kk_channel_name, user_mentions=kk_users)\ndef quickmod_logregsa(X, y, alist, penalty, solver='liblinear', random_state=74):\n for a in alist:\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state)\n sc = StandardScaler()\n Z_train = sc.fit_transform(X_train)\n Z_test = sc.transform(X_test)\n logreg = LogisticRegression(penalty=penalty, C=(1/a), solver=solver, random_state=random_state)\n logreg.fit(Z_train, y_train)\n print(f'𝛼 = {a}')\n print(f'𝐶 = {1/a}')\n print(f'Train Accuracy: {logreg.score(Z_train, y_train)}')\n print(f' Test Accuracy: {logreg.score(Z_test, y_test)}')\n print('')\n\[email protected]_sender(webhook_url=kk_url, channel=kk_channel_name, user_mentions=kk_users)\ndef quickmod_logregsa_coefs(X, y, alist, penalty, solver='liblinear', random_state=74):\n for a in alist:\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state)\n sc = StandardScaler()\n Z_train = sc.fit_transform(X_train)\n Z_test = sc.transform(X_test)\n logreg = LogisticRegression(penalty=penalty, C=(1/a), solver=solver, random_state=random_state)\n logreg.fit(Z_train, y_train)\n coefs = list(zip(X.columns, (list(np.exp(logreg.coef_)[0]))))\n print(f'𝛼 = {a}')\n print(f'𝐶 = {1/a}')\n print(f'Intercept: {logreg.intercept_[0]}')\n print(f'Coefficients:')\n for coef in coefs:\n print(coef)\n print('')\n\[email protected]_sender(webhook_url=kk_url, channel=kk_channel_name, user_mentions=kk_users)\ndef quickmod_logregsc(X, y, clist, penalty, solver='liblinear', random_state=74):\n for c in clist:\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state)\n sc = StandardScaler()\n Z_train = sc.fit_transform(X_train)\n Z_test = sc.transform(X_test)\n logreg = LogisticRegression(penalty=penalty, C=c, solver=solver, random_state=random_state)\n logreg.fit(Z_train, y_train)\n print(f'𝐶 = {c}')\n print(f'Train Accuracy: {logreg.score(Z_train, y_train)}')\n print(f' Test Accuracy: {logreg.score(Z_test, y_test)}') \n print('')\n\[email protected]_sender(webhook_url=kk_url, channel=kk_channel_name, user_mentions=kk_users)\ndef quickmod_logregsc_coefs(X, y, clist, penalty, solver='liblinear', random_state=74):\n for c in clist:\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state)\n sc = StandardScaler()\n Z_train = sc.fit_transform(X_train)\n Z_test = sc.transform(X_test)\n logreg = LogisticRegression(penalty=penalty, C=c, solver=solver, random_state=random_state)\n logreg.fit(Z_train, y_train)\n print(f'𝐶 = {c}')\n print(f'Intercept: {logreg.intercept_[0]}')\n print(f'Coefficients:')\n for coef in coefs:\n print(coef)\n print('')\n\n\n# print(f' Intercept: {logreg.intercept_[0]}')\n# print(f' Coefficients: {logreg.coef_[0]}')\n# print(f' Predictions: {logreg.predict(X_test)[:10]}') \n# print(f' Probabilities: {logreg.predict_proba(X_test)[:10]}') \n\n\n\ndef make_colormap(colors_list): \n from colour import Color\n from matplotlib.colors import LinearSegmentedColormap\n\n color_map = LinearSegmentedColormap.from_list( 'my_list', [ Color( c ).rgb for c in colors_list ] )\n plt.figure( figsize = (15,3))\n plt.imshow( [list(np.arange(0, len( ramp_colors ) , 0.1)) ] , interpolation='nearest', origin='lower', cmap= color_map )\n plt.xticks([])\n plt.yticks([])\n return color_map\n\n# custom_cmap = make_colormap( ['#ACD701','#69B636','#32A318'] ) " ]
[ [ "matplotlib.pyplot.yticks", "sklearn.linear_model.LogisticRegression", "matplotlib.pyplot.title", "sklearn.model_selection.train_test_split", "sklearn.neighbors.KNeighborsClassifier", "sklearn.preprocessing.StandardScaler", "numpy.exp", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure" ] ]
yemen2016/FakeNewsDetection
[ "1caad62b068fb125f18c2f35299c36981a86ba55" ]
[ "ML Code/NB.py" ]
[ "from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn import decomposition, ensemble\nfrom sklearn.model_selection import cross_validate\nimport pandas, xgboost, numpy, textblob, string\nfrom keras.preprocessing import text, sequence\nfrom keras import layers, models, optimizers\nimport pickle \nimport numpy\n# load the dataset\ndata = open('ManualAnnotatedFakeNewsDataset.txt').read()\n#data = open('AutomaticAnnotatedFakeNewsDataset.txt').read()\nlabels, texts = [], []\nfor i, line in enumerate(data.split(\"\\n\")):\n content = line.split(\"\\t\")\n labels.append(content[0])\n texts.append(\" \".join(content[1:]))\n#stemming\ndata1 = []\nfrom nltk import word_tokenize\n\nfrom nltk.stem.isri import ISRIStemmer\n\nst = ISRIStemmer()\nfor tx in texts:\n tweet = \"\"\n for a in word_tokenize(tx):\n tweet = tweet + st.stem(a)+ \" \"\n data1.append(tweet.strip())\n\n#print(data1[:10])\n#tashfeen\ndata2 = []\nimport pyarabic.arabrepr\narepr = pyarabic.arabrepr.ArabicRepr()\nrepr = arepr.repr\nfrom tashaphyne.stemming import ArabicLightStemmer\nArListem = ArabicLightStemmer()\nfor tx in texts:\n tweet = \"\"\n for a in word_tokenize(tx):\n stem = ArListem.light_stem(a)\n #tweet = tweet + ArListem.get_stem()+ \" \"\n tweet = tweet + ArListem.get_root()+ \" \"\n data2.append(tweet.strip())\n#print(data2[:10])\n\n# create a dataframe using texts and lables\ntrainDF = pandas.DataFrame()\ntrainDF['tweet'] = texts\ntrainDF['class'] = labels\n\n# split the dataset into training and validation datasets \ntrain_x, valid_x, train_y, valid_y = model_selection.train_test_split(trainDF['tweet'], trainDF['class'],test_size = 0.2)\n\n\n\n\n# create a count vectorizer object \ncount_vect = CountVectorizer(analyzer='word', token_pattern=r'\\w{1,}')\ncount_vect.fit(trainDF['tweet'])\n\n# transform the training and validation data using count vectorizer object\nxtrain_count = count_vect.transform(train_x)\nxvalid_count = count_vect.transform(valid_x)\n\n# word level tf-idf\ntfidf_vect = TfidfVectorizer(analyzer='word', token_pattern=r'\\w{1,}', max_features=20000)\ntfidf_vect.fit(trainDF['tweet'])\nxtrain_tfidf = tfidf_vect.transform(train_x)\nxvalid_tfidf = tfidf_vect.transform(valid_x)\n\n# ngram level tf-idf \ntfidf_vect_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\\w{1,}', ngram_range=(2,3), max_features=20000)\ntfidf_vect_ngram.fit(trainDF['tweet'])\nxtrain_tfidf_ngram = tfidf_vect_ngram.transform(train_x)\nxvalid_tfidf_ngram = tfidf_vect_ngram.transform(valid_x)\n\n# characters level tf-idf\ntfidf_vect_ngram_chars = TfidfVectorizer(analyzer='char', token_pattern=r'\\w{1,}', ngram_range=(2,3), max_features=20000)\ntfidf_vect_ngram_chars.fit(trainDF['tweet'])\nxtrain_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(train_x) \nxvalid_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(valid_x) \n\ndef train_model(classifier, feature_vector_train, label, feature_vector_valid, modelName, is_neural_net=False):\n # fit the training dataset on the classifier\n clf = classifier.fit(feature_vector_train, label)\n with open(modelName, 'wb') as picklefile:\n pickle.dump(clf,picklefile)\n # predict the labels on validation dataset\n predictions = clf.predict(feature_vector_valid)\n #scores = cross_validate(clf, feature_vector_train, label, cv=10, scoring='f1_weighted')\n #print(scores)\n #for x in scores:\n #print(x)\n if is_neural_net:\n predictions = predictions.argmax(axis=-1)\n f = open('FakeNews/results.txt', 'a+')\n #return metrics.accuracy_score(predictions, valid_y)\n print(metrics.precision_score(predictions, valid_y, average='weighted'))\n f.write(str(metrics.precision_score(predictions, valid_y, average='weighted'))+\"\\t\")\n print(metrics.recall_score(predictions, valid_y, average='weighted'))\n f.write(str(metrics.recall_score(predictions, valid_y, average='weighted'))+\"\\t\")\n f.write(str(metrics.f1_score(predictions, valid_y, average='weighted'))+\"\\n\")\n f.close()\n return metrics.f1_score(predictions, valid_y, average='weighted')\n\n# Naive Bayes on Count Vectors\nNBmodelname = \"FakeNews/10CountVectors_NB_Model\"\naccuracy = train_model(naive_bayes.MultinomialNB(), xtrain_count, train_y, xvalid_count,NBmodelname)\nprint (\"NB, Count Vectors: \", accuracy)\n\n# Naive Bayes on Word Level TF IDF Vectors\nNBmodelname = \"FakeNews/11WordLevel_NB_Model\"\naccuracy = train_model(naive_bayes.MultinomialNB(), xtrain_tfidf, train_y, xvalid_tfidf,NBmodelname)\nprint (\"NB, WordLevel TF-IDF: \", accuracy)\n\n# Naive Bayes on Ngram Level TF IDF Vectors\nNBmodelname = \"FakeNews/12N-GramVectors_NB_Model\"\naccuracy = train_model(naive_bayes.MultinomialNB(), xtrain_tfidf_ngram, train_y, xvalid_tfidf_ngram,NBmodelname)\nprint (\"NB, N-Gram Vectors: \", accuracy)\n\n# Naive Bayes on Character Level TF IDF Vectors\nNBmodelname = \"FakeNews/13CharLevelVectors_NB_Model\"\naccuracy = train_model(naive_bayes.MultinomialNB(), xtrain_tfidf_ngram_chars, train_y, xvalid_tfidf_ngram_chars,NBmodelname)\nprint (\"NB, CharLevel Vectors: \", accuracy)\n\n\n" ]
[ [ "sklearn.naive_bayes.MultinomialNB", "sklearn.metrics.precision_score", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.metrics.f1_score", "sklearn.metrics.recall_score", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
otanet/RLAS2021_chatbot_DeepRL_20220111
[ "96c6c15ebd0f0c30f6532c64212eca48d134ba6a" ]
[ "dqn_agent.py" ]
[ "from keras.models import Sequential\nfrom keras.layers import Dense\n# from keras.optimizers import Adam\nfrom tensorflow.keras.optimizers import Adam\nimport random, copy\nimport numpy as np\nfrom dialogue_config import rule_requests, agent_actions\nimport re\n\n\n# Some of the code based off of https://jaromiru.com/2016/09/27/lets-make-a-dqn-theory/\n# Note: In original paper's code the epsilon is not annealed and annealing is not implemented in this code either\n\n\nclass DQNAgent:\n \"\"\"The DQN agent that interacts with the user.\"\"\"\n\n def __init__(self, state_size, constants):\n \"\"\"\n The constructor of DQNAgent.\n\n The constructor of DQNAgent which saves constants, sets up neural network graphs, etc.\n\n Parameters:\n state_size (int): The state representation size or length of numpy array\n constants (dict): Loaded constants in dict\n\n \"\"\"\n\n self.C = constants['agent']\n self.memory = []\n self.memory_index = 0\n self.max_memory_size = self.C['max_mem_size']\n self.eps = self.C['epsilon_init']\n self.vanilla = self.C['vanilla']\n self.lr = self.C['learning_rate']\n self.gamma = self.C['gamma']\n self.batch_size = self.C['batch_size']\n self.hidden_size = self.C['dqn_hidden_size']\n\n self.load_weights_file_path = self.C['load_weights_file_path']\n self.save_weights_file_path = self.C['save_weights_file_path']\n\n if self.max_memory_size < self.batch_size:\n raise ValueError('Max memory size must be at least as great as batch size!')\n\n self.state_size = state_size\n self.possible_actions = agent_actions\n self.num_actions = len(self.possible_actions)\n\n self.rule_request_set = rule_requests\n\n self.beh_model = self._build_model()\n self.tar_model = self._build_model()\n\n self._load_weights()\n\n self.reset()\n\n def _build_model(self):\n \"\"\"Builds and returns model/graph of neural network.\"\"\"\n\n model = Sequential()\n model.add(Dense(self.hidden_size, input_dim=self.state_size, activation='relu'))\n model.add(Dense(self.num_actions, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.lr))\n return model\n\n def reset(self):\n \"\"\"Resets the rule-based variables.\"\"\"\n\n self.rule_current_slot_index = 0\n self.rule_phase = 'not done'\n\n def get_action(self, state, use_rule=False):\n \"\"\"\n Returns the action of the agent given a state.\n\n Gets the action of the agent given the current state. Either the rule-based policy or the neural networks are\n used to respond.\n\n Parameters:\n state (numpy.array): The database with format dict(long: dict)\n use_rule (bool): Indicates whether or not to use the rule-based policy, which depends on if this was called\n in warmup or training. Default: False\n\n Returns:\n int: The index of the action in the possible actions\n dict: The action/response itself\n\n \"\"\"\n\n if self.eps > random.random():\n index = random.randint(0, self.num_actions - 1)\n action = self._map_index_to_action(index)\n return index, action\n else:\n if use_rule:\n return self._rule_action()\n else:\n return self._dqn_action(state)\n\n def _rule_action(self):\n \"\"\"\n Returns a rule-based policy action.\n\n Selects the next action of a simple rule-based policy.\n\n Returns:\n int: The index of the action in the possible actions\n dict: The action/response itself\n\n \"\"\"\n\n if self.rule_current_slot_index < len(self.rule_request_set):\n slot = self.rule_request_set[self.rule_current_slot_index]\n self.rule_current_slot_index += 1\n rule_response = {'intent': 'request', 'inform_slots': {}, 'request_slots': {slot: 'UNK'}}\n elif self.rule_phase == 'not done':\n rule_response = {'intent': 'match_found', 'inform_slots': {}, 'request_slots': {}}\n self.rule_phase = 'done'\n elif self.rule_phase == 'done':\n rule_response = {'intent': 'done', 'inform_slots': {}, 'request_slots': {}}\n else:\n raise Exception('Should not have reached this clause')\n\n index = self._map_action_to_index(rule_response)\n return index, rule_response\n\n def _map_action_to_index(self, response):\n \"\"\"\n Maps an action to an index from possible actions.\n\n Parameters:\n response (dict)\n\n Returns:\n int\n \"\"\"\n\n for (i, action) in enumerate(self.possible_actions):\n if response == action:\n return i\n raise ValueError('Response: {} not found in possible actions'.format(response))\n\n def _dqn_action(self, state):\n \"\"\"\n Returns a behavior model output given a state.\n\n Parameters:\n state (numpy.array)\n\n Returns:\n int: The index of the action in the possible actions\n dict: The action/response itself\n \"\"\"\n\n index = np.argmax(self._dqn_predict_one(state))\n action = self._map_index_to_action(index)\n return index, action\n\n def _map_index_to_action(self, index):\n \"\"\"\n Maps an index to an action in possible actions.\n\n Parameters:\n index (int)\n\n Returns:\n dict\n \"\"\"\n\n for (i, action) in enumerate(self.possible_actions):\n if index == i:\n return copy.deepcopy(action)\n raise ValueError('Index: {} not in range of possible actions'.format(index))\n\n def _dqn_predict_one(self, state, target=False):\n \"\"\"\n Returns a model prediction given a state.\n\n Parameters:\n state (numpy.array)\n target (bool)\n\n Returns:\n numpy.array\n \"\"\"\n\n return self._dqn_predict(state.reshape(1, self.state_size), target=target).flatten()\n\n def _dqn_predict(self, states, target=False):\n \"\"\"\n Returns a model prediction given an array of states.\n\n Parameters:\n states (numpy.array)\n target (bool)\n\n Returns:\n numpy.array\n \"\"\"\n\n if target:\n return self.tar_model.predict(states)\n else:\n return self.beh_model.predict(states)\n\n def add_experience(self, state, action, reward, next_state, done):\n \"\"\"\n Adds an experience tuple made of the parameters to the memory.\n\n Parameters:\n state (numpy.array)\n action (int)\n reward (int)\n next_state (numpy.array)\n done (bool)\n\n \"\"\"\n\n if len(self.memory) < self.max_memory_size:\n self.memory.append(None)\n self.memory[self.memory_index] = (state, action, reward, next_state, done)\n self.memory_index = (self.memory_index + 1) % self.max_memory_size\n\n def empty_memory(self):\n \"\"\"Empties the memory and resets the memory index.\"\"\"\n\n self.memory = []\n self.memory_index = 0\n\n def is_memory_full(self):\n \"\"\"Returns true if the memory is full.\"\"\"\n\n return len(self.memory) == self.max_memory_size\n\n def train(self):\n \"\"\"\n Trains the agent by improving the behavior model given the memory tuples.\n\n Takes batches of memories from the memory pool and processing them. The processing takes the tuples and stacks\n them in the correct format for the neural network and calculates the Bellman equation for Q-Learning.\n\n \"\"\"\n\n # Calc. num of batches to run\n num_batches = len(self.memory) // self.batch_size\n for b in range(num_batches):\n batch = random.sample(self.memory, self.batch_size)\n\n states = np.array([sample[0] for sample in batch])\n next_states = np.array([sample[3] for sample in batch])\n\n assert states.shape == (self.batch_size, self.state_size), 'States Shape: {}'.format(states.shape)\n assert next_states.shape == states.shape\n\n beh_state_preds = self._dqn_predict(states) # For leveling error\n if not self.vanilla:\n beh_next_states_preds = self._dqn_predict(next_states) # For indexing for DDQN\n tar_next_state_preds = self._dqn_predict(next_states, target=True) # For target value for DQN (& DDQN)\n\n inputs = np.zeros((self.batch_size, self.state_size))\n targets = np.zeros((self.batch_size, self.num_actions))\n\n for i, (s, a, r, s_, d) in enumerate(batch):\n t = beh_state_preds[i]\n if not self.vanilla:\n t[a] = r + self.gamma * tar_next_state_preds[i][np.argmax(beh_next_states_preds[i])] * (not d)\n else:\n t[a] = r + self.gamma * np.amax(tar_next_state_preds[i]) * (not d)\n\n inputs[i] = s\n targets[i] = t\n\n self.beh_model.fit(inputs, targets, epochs=1, verbose=0)\n\n def copy(self):\n \"\"\"Copies the behavior model's weights into the target model's weights.\"\"\"\n\n self.tar_model.set_weights(self.beh_model.get_weights())\n\n def save_weights(self):\n \"\"\"Saves the weights of both models in two h5 files.\"\"\"\n\n if not self.save_weights_file_path:\n return\n beh_save_file_path = re.sub(r'\\.h5', r'_beh.h5', self.save_weights_file_path)\n self.beh_model.save_weights(beh_save_file_path)\n tar_save_file_path = re.sub(r'\\.h5', r'_tar.h5', self.save_weights_file_path)\n self.tar_model.save_weights(tar_save_file_path)\n\n def _load_weights(self):\n \"\"\"Loads the weights of both models from two h5 files.\"\"\"\n\n if not self.load_weights_file_path:\n return\n beh_load_file_path = re.sub(r'\\.h5', r'_beh.h5', self.load_weights_file_path)\n self.beh_model.load_weights(beh_load_file_path)\n tar_load_file_path = re.sub(r'\\.h5', r'_tar.h5', self.load_weights_file_path)\n self.tar_model.load_weights(tar_load_file_path)\n" ]
[ [ "numpy.amax", "tensorflow.keras.optimizers.Adam", "numpy.argmax", "numpy.array", "numpy.zeros" ] ]
ishine/AFILM
[ "be8b13ed3f45f4f58cbd37a9fe079d786be398e8" ]
[ "codes/utils.py" ]
[ "import os\nimport numpy as np\nimport h5py\nimport librosa\nimport soundfile as sf\nfrom scipy import interpolate\n\nfrom scipy.signal import decimate\nfrom matplotlib import pyplot as plt\n\n\ndef load_h5(h5_path):\n with h5py.File(h5_path, 'r') as hf:\n print('List of arrays in input file:', list(hf.keys()))\n X = np.array(hf.get('data'))\n Y = np.array(hf.get('label'))\n print('Shape of X:', X.shape)\n print('Shape of Y:', Y.shape)\n\n return X, Y\n\ndef spline_up(x_lr, r):\n x_lr = x_lr.flatten()\n x_hr_len = len(x_lr) * r\n x_sp = np.zeros(x_hr_len)\n\n i_lr = np.arange(x_hr_len, step=r)\n i_hr = np.arange(x_hr_len)\n\n f = interpolate.splrep(i_lr, x_lr)\n\n x_sp = interpolate.splev(i_hr, f)\n\n return x_sp\n\ndef upsample_wav(wav, args, model, save_spectrum=False):\n # load signal\n x_hr, fs = librosa.load(wav, sr=args.sr)\n x_lr_t = decimate(x_hr, args.r)\n # pad to mutliple of patch size to ensure model runs over entire sample\n x_hr = np.pad(x_hr, (0, args.patch_size - (x_hr.shape[0] % args.patch_size)), 'constant', constant_values=(0,0))\n # downscale signal\n x_lr = decimate(x_hr, args.r)\n\n # upscale the low-res version\n x_lr = x_lr.reshape((1,len(x_lr),1))\n\n # preprocessing\n assert len(x_lr) == 1\n x_sp = spline_up(x_lr, args.r)\n x_sp = x_sp[:len(x_sp) - (len(x_sp) % (2**(args.layers+1)))]\n x_sp = x_sp.reshape((1,len(x_sp),1))\n x_sp = x_sp.reshape((int(x_sp.shape[1]/args.patch_size), args.patch_size,1))\n\n # prediction\n pred = model.predict(x_sp, batch_size=16)\n x_pr = pred.flatten()\n\n # crop so that it works with scaling ratio\n x_hr = x_hr[:len(x_pr)]\n x_lr_t = x_lr_t[:len(x_pr)]\n\n # save the file\n outname = wav # + '.' + args.out_label\n sf.write(outname + '.lr.wav', x_lr_t, int(fs / args.r))\n sf.write(outname + '.hr.wav', x_hr, fs)\n sf.write(outname + '.pr.wav', x_pr, fs)\n\n if save_spectrum:\n # save the spectrum\n S = get_spectrum(x_pr, n_fft=2048)\n save_spectrum(S, outfile=outname + '.pr.png')\n S = get_spectrum(x_hr, n_fft=2048)\n save_spectrum(S, outfile=outname + '.hr.png')\n S = get_spectrum(x_lr, n_fft=int(2048/args.r))\n save_spectrum(S, outfile=outname + '.lr.png')\n\ndef get_spectrum(x, n_fft=2048):\n S = librosa.stft(x, n_fft)\n p = np.angle(S)\n S = np.log1p(np.abs(S))\n return S\n\ndef save_spectrum(S, lim=800, outfile='spectrogram.png'):\n plt.imshow(S.T, aspect=10)\n # plt.xlim([0,lim])\n plt.tight_layout()\n plt.savefig(outfile)" ]
[ [ "scipy.interpolate.splrep", "matplotlib.pyplot.imshow", "matplotlib.pyplot.tight_layout", "numpy.pad", "numpy.abs", "scipy.signal.decimate", "numpy.arange", "matplotlib.pyplot.savefig", "scipy.interpolate.splev", "numpy.angle", "numpy.zeros" ] ]
Jodainc/Method-Numerics-24.9-Point
[ "f99ebc3eae6ee5b2ef35219c3fc3aaf333a4572d" ]
[ "methodnumeric.py" ]
[ "import numpy as npp\nfrom matplotlib import pyplot as plt\nfrom numpy import array\n\n\nx_data = npp.array([87.8, 96.6, 176, 263, 351, 571,834,1129,1624,2107,2678,3380,4258])\ny_data = npp.array([153,204,255,306,357,408,459,510,561,612,663,714,765])\n\ns=[87.8, 96.6, 176, 263, 351, 571,834,1129,1624,2107,2678,3380,4258]\n#s1= s*1e+3;\ne=[153,204,255,306,357,408,459,510,561,612,663,714,765]\n\nse = []\nee=[]\nsr=[0,0,0,0,0,0,0,0,0]\ndesder=[0,0,0,0,0,0,0,0,0]\nfor i in s:\n se.append(i * 1e+3)\n \n\n \nfor i in e:\n ee.append(i * 1e-3)\n \n \n \nidx=5\n\nnp=len(s)-idx\nfor i in range(0,np):\n sr[i]=s[idx+i]\nde=51e-3;\ndde=2*de;\nfor i in range(0,np-1):\n if i>0 :\n desder[i]= (sr[i+1]-sr[i-1])/dde\n else:\n desder[0]= ((-sr[2])+4*sr[1]-3*sr[0])/dde\n \ndesder[np]= (3*sr[np-1]-(4*sr[np-2])+sr[np-3])/dde\ndesder[np-1]= (3*sr[np-1]-(4*sr[np-2])+sr[np-3])/dde\nprint(3*sr[np-1] )\nprint(-4*sr[np-2] )\nprint(sr[np-3] )\nprint(dde )\nprint(desder )\nx_data = array( sr)\ny_data =array( desder )\n\nc1 = npp.polyfit(x_data,y_data,1)\na= c1[0]\neo=c1[1]\nsp=[0,0,0,0,0,0] \ndsde1=npp.polyval(c1,sp)\n#plt.plot(sp,dsde1,sr,desder,'-')\nep = [0 for x in range(163)]\nfor i in range(1,161): \n if i==0:\n ep[0]= 0.00000 \n else:\n ep[i]= ep[i-1] + 0.00500\n\nsp=(eo/a)*(npp.exp(a*ep)-1)\n#punto b\n#plt.plot(ep,sp,e,s,'*')\nsStart=s[10];\neStart=e[10];\nsBar=sStart/(npp.exp(a*eStart)-1)\nsp2=sBar*(npp.exp(a*ep)-1)\n#punto C\nplt.plot(ep,sp2,e,s,'*')\n" ]
[ [ "numpy.polyfit", "matplotlib.pyplot.plot", "numpy.exp", "numpy.array", "numpy.polyval" ] ]
Niram7777/tensorflow
[ "0e40b3e0c30caff9427c1da54c40b6236608ec15" ]
[ "tensorflow/python/ops/array_ops.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# Tests for this file live in python/kernel_tests/array_ops_test.py\n\"\"\"Support for manipulating tensors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numbers\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import common_shapes\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\n# 'Constant' gets imported in the module 'array_ops'.\nfrom tensorflow.python.framework.constant_op import constant\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_array_ops import *\nfrom tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse # pylint: disable=unused-import\nfrom tensorflow.python.types import core\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util.tf_export import tf_export\n# pylint: enable=wildcard-import\n\n# Used for slicing to specify a new 1 size dimension\nnewaxis = None\ntf_export(\"newaxis\").export_constant(__name__, \"newaxis\")\n\n# We override the 'slice' for the \"slice\" op, so we keep Python's\n# existing 'slice' for later use in this module.\n_BaseSlice = slice\n\n\n@tf_export(\"reshape\", v1=[\"reshape\", \"manip.reshape\"])\[email protected]_dispatch_support\ndef reshape(tensor, shape, name=None): # pylint: disable=redefined-outer-name\n r\"\"\"Reshapes a tensor.\n\n Given `tensor`, this operation returns a new `tf.Tensor` that has the same\n values as `tensor` in the same order, except with a new shape given by\n `shape`.\n\n >>> t1 = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> print(tf.shape(t1).numpy())\n [2 3]\n >>> t2 = tf.reshape(t1, [6])\n >>> t2\n <tf.Tensor: shape=(6,), dtype=int32,\n numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>\n >>> tf.reshape(t2, [3, 2])\n <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)>\n\n The `tf.reshape` does not change the order of or the total number of elements\n in the tensor, and so it can reuse the underlying data buffer. This makes it\n a fast operation independent of how big of a tensor it is operating on.\n\n >>> tf.reshape([1, 2, 3], [2, 2])\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Input to reshape is a tensor with 3 values, but the\n requested shape has 4\n\n To instead reorder the data to rearrange the dimensions of a tensor, see\n `tf.transpose`.\n\n >>> t = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> tf.reshape(t, [3, 2]).numpy()\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)\n >>> tf.transpose(t, perm=[1, 0]).numpy()\n array([[1, 4],\n [2, 5],\n [3, 6]], dtype=int32)\n\n If one component of `shape` is the special value -1, the size of that\n dimension is computed so that the total size remains constant. In particular,\n a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` can\n be -1.\n\n >>> t = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> tf.reshape(t, [-1])\n <tf.Tensor: shape=(6,), dtype=int32,\n numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>\n >>> tf.reshape(t, [3, -1])\n <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)>\n >>> tf.reshape(t, [-1, 2])\n <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)>\n\n `tf.reshape(t, [])` reshapes a tensor `t` with one element to a scalar.\n\n >>> tf.reshape([7], []).numpy()\n 7\n\n More examples:\n\n >>> t = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> print(tf.shape(t).numpy())\n [9]\n >>> tf.reshape(t, [3, 3])\n <tf.Tensor: shape=(3, 3), dtype=int32, numpy=\n array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]], dtype=int32)>\n\n >>> t = [[[1, 1], [2, 2]],\n ... [[3, 3], [4, 4]]]\n >>> print(tf.shape(t).numpy())\n [2 2 2]\n >>> tf.reshape(t, [2, 4])\n <tf.Tensor: shape=(2, 4), dtype=int32, numpy=\n array([[1, 1, 2, 2],\n [3, 3, 4, 4]], dtype=int32)>\n\n >>> t = [[[1, 1, 1],\n ... [2, 2, 2]],\n ... [[3, 3, 3],\n ... [4, 4, 4]],\n ... [[5, 5, 5],\n ... [6, 6, 6]]]\n >>> print(tf.shape(t).numpy())\n [3 2 3]\n >>> # Pass '[-1]' to flatten 't'.\n >>> tf.reshape(t, [-1])\n <tf.Tensor: shape=(18,), dtype=int32,\n numpy=array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],\n dtype=int32)>\n >>> # -- Using -1 to infer the shape --\n >>> # Here -1 is inferred to be 9:\n >>> tf.reshape(t, [2, -1])\n <tf.Tensor: shape=(2, 9), dtype=int32, numpy=\n array([[1, 1, 1, 2, 2, 2, 3, 3, 3],\n [4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>\n >>> # -1 is inferred to be 2:\n >>> tf.reshape(t, [-1, 9])\n <tf.Tensor: shape=(2, 9), dtype=int32, numpy=\n array([[1, 1, 1, 2, 2, 2, 3, 3, 3],\n [4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>\n >>> # -1 is inferred to be 3:\n >>> tf.reshape(t, [ 2, -1, 3])\n <tf.Tensor: shape=(2, 3, 3), dtype=int32, numpy=\n array([[[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]],\n [[4, 4, 4],\n [5, 5, 5],\n [6, 6, 6]]], dtype=int32)>\n\n Args:\n tensor: A `Tensor`.\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Defines the shape of the output tensor.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n \"\"\"\n result = gen_array_ops.reshape(tensor, shape, name)\n tensor_util.maybe_set_static_shape(result, shape)\n return result\n\n\n@tf_export(\"fill\")\[email protected]_dispatch_support\ndef fill(dims, value, name=None):\n r\"\"\"Creates a tensor filled with a scalar value.\n\n See also `tf.ones`, `tf.zeros`, `tf.one_hot`, `tf.eye`.\n\n This operation creates a tensor of shape `dims` and fills it with `value`.\n\n For example:\n\n >>> tf.fill([2, 3], 9)\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[9, 9, 9],\n [9, 9, 9]], dtype=int32)>\n\n `tf.fill` evaluates at graph runtime and supports dynamic shapes based on\n other runtime `tf.Tensors`, unlike `tf.constant(value, shape=dims)`, which\n embeds the value as a `Const` node.\n\n Args:\n dims: A 1-D sequence of non-negative numbers. Represents the shape of the\n output `tf.Tensor`. Entries should be of type: `int32`, `int64`.\n value: A value to fill the returned `tf.Tensor`.\n name: Optional string. The name of the output `tf.Tensor`.\n\n Returns:\n A `tf.Tensor` with shape `dims` and the same dtype as `value`.\n\n Raises:\n InvalidArgumentError: `dims` contains negative entries.\n NotFoundError: `dims` contains non-integer entries.\n\n @compatibility(numpy)\n Similar to `np.full`. In `numpy`, more parameters are supported. Passing a\n number argument as the shape (`np.full(5, value)`) is valid in `numpy` for\n specifying a 1-D shaped result, while TensorFlow does not support this syntax.\n @end_compatibility\n \"\"\"\n result = gen_array_ops.fill(dims, value, name=name)\n tensor_util.maybe_set_static_shape(result, dims)\n return result\n\n\n@tf_export(\"identity\")\[email protected]_dispatch_support\ndef identity(input, name=None): # pylint: disable=redefined-builtin\n r\"\"\"Return a Tensor with the same shape and contents as input.\n\n The return value is not the same Tensor as the original, but contains the same\n values. This operation is fast when used on the same device.\n\n For example:\n\n >>> a = tf.constant([0.78])\n >>> a_identity = tf.identity(a)\n >>> a.numpy()\n array([0.78], dtype=float32)\n >>> a_identity.numpy()\n array([0.78], dtype=float32)\n\n Calling `tf.identity` on a variable will make a Tensor that represents the\n value of that variable at the time it is called. This is equivalent to calling\n `<variable>.read_value()`.\n\n >>> a = tf.Variable(5)\n >>> a_identity = tf.identity(a)\n >>> a.assign_add(1)\n <tf.Variable ... shape=() dtype=int32, numpy=6>\n >>> a.numpy()\n 6\n >>> a_identity.numpy()\n 5\n\n Args:\n input: A `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n \"\"\"\n if isinstance(input, composite_tensor.CompositeTensor):\n return nest.map_structure(identity, input, expand_composites=True)\n if context.executing_eagerly() and not hasattr(input, \"graph\"):\n # Make sure we get an input with handle data attached from resource\n # variables. Variables have correct handle data when graph building.\n input = ops.convert_to_tensor(input)\n ret = gen_array_ops.identity(input, name=name)\n # Propagate handle data for happier shape inference for resource variables.\n if hasattr(input, \"_handle_data\"):\n ret._handle_data = input._handle_data # pylint: disable=protected-access\n return ret\n\n\n# pylint: disable=redefined-builtin,protected-access\n@tf_export(v1=[\"expand_dims\"])\[email protected]_dispatch_support\[email protected]_args(None, \"Use the `axis` argument instead\", \"dim\")\ndef expand_dims(input, axis=None, name=None, dim=None):\n \"\"\"Returns a tensor with a length 1 axis inserted at index `axis`.\n\n Given a tensor `input`, this operation inserts a dimension of length 1 at the\n dimension index `axis` of `input`'s shape. The dimension index follows Python\n indexing rules: It's zero-based, a negative index it is counted backward\n from the end.\n\n This operation is useful to:\n\n * Add an outer \"batch\" dimension to a single element.\n * Align axes for broadcasting.\n * To add an inner vector length axis to a tensor of scalars.\n\n For example:\n\n If you have a single image of shape `[height, width, channels]`:\n\n >>> image = tf.zeros([10,10,3])\n\n You can add an outer `batch` axis by passing `axis=0`:\n\n >>> tf.expand_dims(image, axis=0).shape.as_list()\n [1, 10, 10, 3]\n\n The new axis location matches Python `list.insert(axis, 1)`:\n\n >>> tf.expand_dims(image, axis=1).shape.as_list()\n [10, 1, 10, 3]\n\n Following standard Python indexing rules, a negative `axis` counts from the\n end so `axis=-1` adds an inner most dimension:\n\n >>> tf.expand_dims(image, -1).shape.as_list()\n [10, 10, 3, 1]\n\n This operation requires that `axis` is a valid index for `input.shape`,\n following Python indexing rules:\n\n ```\n -1-tf.rank(input) <= axis <= tf.rank(input)\n ```\n\n This operation is related to:\n\n * `tf.squeeze`, which removes dimensions of size 1.\n * `tf.reshape`, which provides more flexible reshaping capability.\n * `tf.sparse.expand_dims`, which provides this functionality for\n `tf.SparseTensor`\n\n Args:\n input: A `Tensor`.\n axis: 0-D (scalar). Specifies the dimension index at which to expand the\n shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`.\n name: The name of the output `Tensor` (optional).\n dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.\n\n Returns:\n A `Tensor` with the same data as `input`, but its shape has an additional\n dimension of size 1 added.\n\n Raises:\n ValueError: if either both or neither of `dim` and `axis` are specified.\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis, \"dim\", dim)\n if axis is None:\n raise ValueError(\"Must specify an axis argument to tf.expand_dims()\")\n return expand_dims_v2(input, axis, name)\n\n\n@tf_export(\"expand_dims\", v1=[])\[email protected]_dispatch_support\ndef expand_dims_v2(input, axis, name=None):\n \"\"\"Returns a tensor with a length 1 axis inserted at index `axis`.\n\n Given a tensor `input`, this operation inserts a dimension of length 1 at the\n dimension index `axis` of `input`'s shape. The dimension index follows Python\n indexing rules: It's zero-based, a negative index it is counted backward\n from the end.\n\n This operation is useful to:\n\n * Add an outer \"batch\" dimension to a single element.\n * Align axes for broadcasting.\n * To add an inner vector length axis to a tensor of scalars.\n\n For example:\n\n If you have a single image of shape `[height, width, channels]`:\n\n >>> image = tf.zeros([10,10,3])\n\n You can add an outer `batch` axis by passing `axis=0`:\n\n >>> tf.expand_dims(image, axis=0).shape.as_list()\n [1, 10, 10, 3]\n\n The new axis location matches Python `list.insert(axis, 1)`:\n\n >>> tf.expand_dims(image, axis=1).shape.as_list()\n [10, 1, 10, 3]\n\n Following standard Python indexing rules, a negative `axis` counts from the\n end so `axis=-1` adds an inner most dimension:\n\n >>> tf.expand_dims(image, -1).shape.as_list()\n [10, 10, 3, 1]\n\n This operation requires that `axis` is a valid index for `input.shape`,\n following Python indexing rules:\n\n ```\n -1-tf.rank(input) <= axis <= tf.rank(input)\n ```\n\n This operation is related to:\n\n * `tf.squeeze`, which removes dimensions of size 1.\n * `tf.reshape`, which provides more flexible reshaping capability.\n * `tf.sparse.expand_dims`, which provides this functionality for\n `tf.SparseTensor`\n\n Args:\n input: A `Tensor`.\n axis: Integer specifying the dimension index at which to expand the\n shape of `input`. Given an input of D dimensions, `axis` must be in range\n `[-(D+1), D]` (inclusive).\n name: Optional string. The name of the output `Tensor`.\n\n Returns:\n A tensor with the same data as `input`, with an additional dimension\n inserted at the index specified by `axis`.\n\n Raises:\n ValueError: If `axis` is not specified.\n InvalidArgumentError: If `axis` is out of range `[-(D+1), D]`.\n \"\"\"\n return gen_array_ops.expand_dims(input, axis, name)\n\n\n# pylint: enable=redefined-builtin,protected-access\n\n\n# Aliases for some automatically-generated names.\n# pylint: disable=protected-access\[email protected](\"2016-11-30\",\n \"This op will be removed after the deprecation date. \"\n \"Please switch to tf.setdiff1d().\")\ndef listdiff(x, y, out_idx=None, name=None):\n return gen_array_ops.list_diff(x, y, out_idx, name)\n\n\nlistdiff.__doc__ = gen_array_ops.list_diff.__doc__ + \"\\n\" + listdiff.__doc__\n\n# pylint: enable=protected-access\n\n\n# pylint: disable=undefined-variable\[email protected](\"2018-11-30\",\n \"This op will be removed after the deprecation date. \"\n \"Please switch to tf.sets.difference().\")\n@tf_export(v1=[\"setdiff1d\"])\[email protected]_dispatch_support\ndef setdiff1d(x, y, index_dtype=dtypes.int32, name=None):\n \"\"\"Computes the difference between two lists of numbers or strings.\n\n Given a list x and a list y, this operation returns a list out that\n represents all values that are in x but not in y. The returned list\n out is sorted in the same order that the numbers appear in x\n (duplicates are preserved). This operation also returns a list idx\n that represents the position of each out element in x.\n\n In other words:\n\n ```python\n out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]\n ```\n\n Example usage:\n\n >>> x = [1, 2, 3, 4, 5, 6]\n >>> y = [1, 3, 5]\n >>> setdiff1d(x,y)\n ListDiff(out=<tf.Tensor: id=2, shape=(3,), dtype=int32,\n numpy=array([2, 4, 6], dtype=int32)>, idx=<tf.Tensor: id=3,\n shape=(3,), dtype=int32, numpy=array([1, 3, 5], dtype=int32)>)\n\n Args:\n x: A Tensor. 1-D. Values to keep.\n y: A Tensor. Must have the same type as x. 1-D. Values to remove.\n out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to\n tf.int32.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of Tensor objects (out, idx).\n out: A Tensor. Has the same type as x.\n idx: A Tensor of type out_idx.\n \"\"\"\n return gen_array_ops.list_diff(x, y, index_dtype, name)\n\n\nsetdiff1d.__doc__ = gen_array_ops.list_diff.__doc__\n\n\n@tf_export(\"broadcast_dynamic_shape\")\[email protected]_dispatch_support\ndef broadcast_dynamic_shape(shape_x, shape_y):\n \"\"\"Computes the shape of a broadcast given symbolic shapes.\n\n When shape_x and shape_y are Tensors representing shapes (i.e. the result of\n calling tf.shape on another Tensor) this computes a Tensor which is the shape\n of the result of a broadcasting op applied in tensors of shapes shape_x and\n shape_y.\n\n For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a\n Tensor whose value is [5, 2, 3].\n\n This is useful when validating the result of a broadcasting operation when the\n tensors do not have statically known shapes.\n\n Args:\n shape_x: A rank 1 integer `Tensor`, representing the shape of x.\n shape_y: A rank 1 integer `Tensor`, representing the shape of y.\n\n Returns:\n A rank 1 integer `Tensor` representing the broadcasted shape.\n \"\"\"\n return gen_array_ops.broadcast_args(shape_x, shape_y)\n\n\n@tf_export(\"broadcast_static_shape\")\[email protected]_dispatch_support\ndef broadcast_static_shape(shape_x, shape_y):\n \"\"\"Computes the shape of a broadcast given known shapes.\n\n When shape_x and shape_y are fully known TensorShapes this computes a\n TensorShape which is the shape of the result of a broadcasting op applied in\n tensors of shapes shape_x and shape_y.\n\n For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a\n TensorShape whose value is [5, 2, 3].\n\n This is useful when validating the result of a broadcasting operation when the\n tensors have statically known shapes.\n\n Args:\n shape_x: A `TensorShape`\n shape_y: A `TensorShape`\n\n Returns:\n A `TensorShape` representing the broadcasted shape.\n\n Raises:\n ValueError: If the two shapes can not be broadcasted.\n \"\"\"\n return common_shapes.broadcast_shape(shape_x, shape_y)\n\n\n@tf_export(\"shape\", v1=[])\[email protected]_dispatch_support\ndef shape_v2(input, out_type=dtypes.int32, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the shape of a tensor.\n \n See also `tf.size`, `tf.rank`.\n\n `tf.shape` returns a 1-D integer tensor representing the shape of `input`.\n\n For example:\n\n >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n >>> tf.shape(t)\n <tf.Tensor: shape=(3,), dtype=int32, numpy=array([2, 2, 3], dtype=int32)>\n\n Note: When using symbolic tensors, such as when using the Keras API,\n tf.shape() will return the shape of the symbolic tensor.\n\n >>> a = tf.keras.layers.Input((None, 10))\n >>> tf.shape(a)\n <tf.Tensor ... shape=(3,) dtype=int32>\n\n In these cases, using `tf.Tensor.shape` will return more informative results.\n\n >>> a.shape\n TensorShape([None, None, 10])\n \n (The first `None` represents the as yet unknown batch size.)\n\n `tf.shape` and `Tensor.shape` should be identical in eager mode. Within\n `tf.function` or within a `compat.v1` context, not all dimensions may be\n known until execution time. Hence when defining custom layers and models\n for graph mode, prefer the dynamic `tf.shape(x)` over the static `x.shape`.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n out_type: (Optional) The specified output type of the operation (`int32` or\n `int64`). Defaults to `tf.int32`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `out_type`.\n \"\"\"\n return shape(input, name, out_type)\n\n\n@tf_export(v1=[\"shape\"])\[email protected]_dispatch_support\ndef shape(input, name=None, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the shape of a tensor.\n\n This operation returns a 1-D integer tensor representing the shape of `input`.\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.shape(t) # [2, 2, 3]\n ```\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n out_type: (Optional) The specified output type of the operation (`int32`\n or `int64`). Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`.\n \"\"\"\n return shape_internal(input, name, optimize=True, out_type=out_type)\n\n\ndef shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the shape of a tensor.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n optimize: if true, encode the shape as a constant when possible.\n out_type: (Optional) The specified output type of the operation (`int32` or\n `int64`). Defaults to tf.int32.\n\n Returns:\n A `Tensor` of type `out_type`.\n\n \"\"\"\n with ops.name_scope(name, \"Shape\", [input]) as name:\n if isinstance(\n input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n return gen_math_ops.cast(input.dense_shape, out_type)\n else:\n if not context.executing_eagerly():\n input = ops.convert_to_tensor(input)\n input_shape = input.get_shape()\n if optimize and input_shape.is_fully_defined():\n return constant(input_shape.as_list(), out_type, name=name)\n return gen_array_ops.shape(input, name=name, out_type=out_type)\n\n\n@tf_export(\"shape_n\")\[email protected]_dispatch_support\ndef shape_n(input, out_type=dtypes.int32, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Returns shape of tensors.\n\n Args:\n input: A list of at least 1 `Tensor` object with the same type.\n out_type: The specified output type of the operation (`int32` or `int64`).\n Defaults to `tf.int32`(optional).\n name: A name for the operation (optional).\n\n Returns:\n A list with the same length as `input` of `Tensor` objects with\n type `out_type`.\n \"\"\"\n\n return gen_array_ops.shape_n(input, out_type=out_type, name=name)\n\n\n@tf_export(\"size\", v1=[])\[email protected]_dispatch_support\ndef size_v2(input, out_type=dtypes.int32, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the size of a tensor.\n \n See also `tf.shape`.\n\n Returns a 0-D `Tensor` representing the number of elements in `input`\n of type `out_type`. Defaults to tf.int32.\n\n For example:\n\n >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n >>> tf.size(t)\n <tf.Tensor: shape=(), dtype=int32, numpy=12>\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n out_type: (Optional) The specified non-quantized numeric output type of the\n operation. Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`. Defaults to `tf.int32`.\n\n @compatibility(numpy)\n Equivalent to np.size()\n @end_compatibility\n \"\"\"\n\n return size(input, name, out_type)\n\n\n@tf_export(v1=[\"size\"])\[email protected]_dispatch_support\ndef size(input, name=None, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the size of a tensor.\n\n Returns a 0-D `Tensor` representing the number of elements in `input`\n of type `out_type`. Defaults to tf.int32.\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.size(t) # 12\n ```\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n out_type: (Optional) The specified non-quantized numeric output type of the\n operation. Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`. Defaults to `tf.int32`.\n\n @compatibility(numpy)\n Equivalent to np.size()\n @end_compatibility\n \"\"\"\n return size_internal(input, name, optimize=True, out_type=out_type)\n\n\ndef size_internal(input, name=None, optimize=True, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin,protected-access\n \"\"\"Returns the size of a tensor.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n optimize: if true, encode the size as a constant when possible.\n out_type: (Optional) The specified non-quantized numeric output type of the\n operation. Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`. Defaults to `tf.int32`.\n \"\"\"\n if (context.executing_eagerly() and not hasattr(input, \"graph\") and\n not isinstance(\n input,\n (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))):\n input = ops.convert_to_tensor(input)\n np_out_type = out_type.as_numpy_dtype\n num_elements = np.prod(input._shape_tuple(), dtype=np_out_type) # pylint: disable=protected-access\n return ops.convert_to_tensor(num_elements, dtype=out_type)\n with ops.name_scope(name, \"Size\", [input]) as name:\n if isinstance(\n input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n return gen_math_ops.prod(\n gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)\n else:\n input = ops.convert_to_tensor(input)\n input_shape = input.get_shape()\n if optimize:\n if input_shape.is_fully_defined():\n return constant(input_shape.num_elements(), out_type, name=name)\n if input_shape.dims and any(dim == 0 for dim in input_shape.dims):\n return constant(0, out_type, name=name)\n return gen_array_ops.size(input, name=name, out_type=out_type)\n\n\n@tf_export(\"rank\")\[email protected]_dispatch_support\ndef rank(input, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the rank of a tensor.\n\n See also `tf.shape`.\n\n Returns a 0-D `int32` `Tensor` representing the rank of `input`.\n\n For example:\n\n ```python\n # shape of tensor 't' is [2, 2, 3]\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.rank(t) # 3\n ```\n\n **Note**: The rank of a tensor is not the same as the rank of a matrix. The\n rank of a tensor is the number of indices required to uniquely select each\n element of the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int32`.\n\n @compatibility(numpy)\n Equivalent to np.ndim\n @end_compatibility\n \"\"\"\n return rank_internal(input, name, optimize=True)\n\n\ndef rank_internal(input, name=None, optimize=True):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the rank of a tensor.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n optimize: if true, encode the rank as a constant when possible.\n\n Returns:\n A `Tensor` of type `int32`.\n \"\"\"\n with ops.name_scope(name, \"Rank\", [input]) as name:\n if isinstance(\n input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n return gen_array_ops.size(input.dense_shape, name=name)\n else:\n input = ops.convert_to_tensor(input)\n input_shape = input.get_shape()\n if optimize and input_shape.ndims is not None:\n return constant(input_shape.ndims, dtypes.int32, name=name)\n return gen_array_ops.rank(input, name=name)\n\n\n_SLICE_TYPE_ERROR = (\n \"Only integers, slices (`:`), ellipsis (`...`), \"\n \"tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid \"\n \"indices\")\n\n_SUPPORTED_SLICE_DTYPES = (dtypes.int32, dtypes.int32_ref, dtypes.int64,\n dtypes.int64_ref)\n\n\ndef _check_index(idx):\n \"\"\"Check if a given value is a valid index into a tensor.\"\"\"\n if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):\n return\n\n # Optimistic check. Assumptions:\n # * any object with a dtype is supported\n # * any object with a dtype has a sizeable shape attribute.\n dtype = getattr(idx, \"dtype\", None)\n if (dtype is None or dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or\n idx.shape and len(idx.shape) == 1):\n # TODO(slebedev): IndexError seems more appropriate here, but it\n # will break `_slice_helper` contract.\n raise TypeError(_SLICE_TYPE_ERROR + \", got {!r}\".format(idx))\n\n\ndef _is_undefined_dimension(d):\n return isinstance(d, tensor_shape.Dimension) and d.value is None\n\n\n@tf_export(\"__operators__.getitem\", v1=[])\[email protected]_dispatch_support\ndef _slice_helper(tensor, slice_spec, var=None):\n \"\"\"Overload for Tensor.__getitem__.\n\n This operation extracts the specified region from the tensor.\n The notation is similar to NumPy with the restriction that\n currently only support basic indexing. That means that\n using a non-scalar tensor as input is not currently allowed.\n\n Some useful examples:\n\n ```python\n # Strip leading and trailing 2 elements\n foo = tf.constant([1,2,3,4,5,6])\n print(foo[2:-2].eval()) # => [3,4]\n\n # Skip every other row and reverse the order of the columns\n foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]\n\n # Use scalar tensors as indices on both dimensions\n print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3\n\n # Insert another dimension\n foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]\n print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],\n [[7],[8],[9]]]\n\n # Ellipses (3 equivalent operations)\n foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n\n # Masks\n foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n print(foo[foo > 2].eval()) # => [3, 4, 5, 6, 7, 8, 9]\n ```\n\n Notes:\n - `tf.newaxis` is `None` as in NumPy.\n - An implicit ellipsis is placed at the end of the `slice_spec`\n - NumPy advanced indexing is currently not supported.\n\n Purpose in the API:\n\n This method is exposed in TensorFlow's API so that library developers\n can register dispatching for `Tensor.__getitem__` to allow it to handle\n custom composite tensors & other custom objects.\n\n The API symbol is not intended to be called by users directly and does\n appear in TensorFlow's generated documentation.\n\n Args:\n tensor: An ops.Tensor object.\n slice_spec: The arguments to Tensor.__getitem__.\n var: In the case of variable slice assignment, the Variable object to slice\n (i.e. tensor is the read-only view of this variable).\n\n Returns:\n The appropriate slice of \"tensor\", based on \"slice_spec\".\n\n Raises:\n ValueError: If a slice range is negative size.\n TypeError: If the slice indices aren't int, slice, ellipsis,\n tf.newaxis or scalar int32/int64 tensors.\n \"\"\"\n if isinstance(slice_spec, bool) or \\\n (isinstance(slice_spec, ops.Tensor) and slice_spec.dtype == dtypes.bool) or \\\n (isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):\n return boolean_mask(tensor=tensor, mask=slice_spec)\n\n if not isinstance(slice_spec, (list, tuple)):\n slice_spec = [slice_spec]\n\n begin, end, strides = [], [], []\n index = 0\n\n new_axis_mask, shrink_axis_mask = 0, 0\n begin_mask, end_mask = 0, 0\n ellipsis_mask = 0\n for s in slice_spec:\n if isinstance(s, _BaseSlice):\n if s.start is not None and not _is_undefined_dimension(s.start):\n _check_index(s.start)\n begin.append(s.start)\n else:\n begin.append(0)\n begin_mask |= (1 << index)\n if s.stop is not None and not _is_undefined_dimension(s.stop):\n _check_index(s.stop)\n end.append(s.stop)\n else:\n end.append(0)\n end_mask |= (1 << index)\n if s.step is not None and not _is_undefined_dimension(s.step):\n _check_index(s.step)\n strides.append(s.step)\n else:\n strides.append(1)\n elif s is Ellipsis:\n begin.append(0)\n end.append(0)\n strides.append(1)\n ellipsis_mask |= (1 << index)\n elif s is newaxis:\n begin.append(0)\n end.append(0)\n strides.append(1)\n new_axis_mask |= (1 << index)\n else:\n _check_index(s)\n begin.append(s)\n end.append(s + 1)\n strides.append(1)\n shrink_axis_mask |= (1 << index)\n index += 1\n\n # stack possibly involves no tensors, so we must use op_scope correct graph.\n with ops.name_scope(\n None,\n \"strided_slice\", [tensor] + begin + end + strides,\n skip_on_eager=False) as name:\n if begin:\n packed_begin, packed_end, packed_strides = (stack(begin), stack(end),\n stack(strides))\n if (packed_begin.dtype == dtypes.int64 or\n packed_end.dtype == dtypes.int64 or\n packed_strides.dtype == dtypes.int64):\n if packed_begin.dtype != dtypes.int64:\n packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)\n if packed_end.dtype != dtypes.int64:\n packed_end = gen_math_ops.cast(packed_end, dtypes.int64)\n if packed_strides.dtype != dtypes.int64:\n packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)\n else:\n var_empty = constant([], dtype=dtypes.int32)\n packed_begin = packed_end = packed_strides = var_empty\n return strided_slice(\n tensor,\n packed_begin,\n packed_end,\n packed_strides,\n begin_mask=begin_mask,\n end_mask=end_mask,\n shrink_axis_mask=shrink_axis_mask,\n new_axis_mask=new_axis_mask,\n ellipsis_mask=ellipsis_mask,\n var=var,\n name=name)\n\n\n# pylint: disable=undefined-variable,protected-access,redefined-outer-name\n@tf_export(\"slice\")\[email protected]_dispatch_support\ndef slice(input_, begin, size, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Extracts a slice from a tensor.\n\n See also `tf.strided_slice`.\n\n This operation extracts a slice of size `size` from a tensor `input_` starting\n at the location specified by `begin`. The slice `size` is represented as a\n tensor shape, where `size[i]` is the number of elements of the 'i'th dimension\n of `input_` that you want to slice. The starting location (`begin`) for the\n slice is represented as an offset in each dimension of `input_`. In other\n words, `begin[i]` is the offset into the i'th dimension of `input_` that you\n want to slice from.\n\n Note that `tf.Tensor.__getitem__` is typically a more pythonic way to\n perform slices, as it allows you to write `foo[3:7, :-2]` instead of\n `tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.\n\n `begin` is zero-based; `size` is one-based. If `size[i]` is -1,\n all remaining elements in dimension i are included in the\n slice. In other words, this is equivalent to setting:\n\n `size[i] = input_.dim_size(i) - begin[i]`\n\n This operation requires that:\n\n `0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]],\n [[3, 3, 3], [4, 4, 4]],\n [[5, 5, 5], [6, 6, 6]]])\n tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]\n tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],\n # [4, 4, 4]]]\n tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],\n # [[5, 5, 5]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n size: An `int32` or `int64` `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input_`.\n \"\"\"\n return gen_array_ops._slice(input_, begin, size, name=name)\n\n\n# pylint: disable=invalid-name\n@tf_export(\"strided_slice\")\[email protected]_dispatch_support\ndef strided_slice(input_,\n begin,\n end,\n strides=None,\n begin_mask=0,\n end_mask=0,\n ellipsis_mask=0,\n new_axis_mask=0,\n shrink_axis_mask=0,\n var=None,\n name=None):\n \"\"\"Extracts a strided slice of a tensor (generalized Python array indexing).\n\n See also `tf.slice`.\n\n **Instead of calling this op directly most users will want to use the\n NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which\n is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**\n The interface of this op is a low-level encoding of the slicing syntax.\n\n Roughly speaking, this op extracts a slice of size `(end-begin)/stride`\n from the given `input_` tensor. Starting at the location specified by `begin`\n the slice continues by adding `stride` to the index until all dimensions are\n not less than `end`.\n Note that a stride can be negative, which causes a reverse slice.\n\n Given a Python slice `input[spec0, spec1, ..., specn]`,\n this function will be called as follows.\n\n `begin`, `end`, and `strides` will be vectors of length n.\n n in general is not equal to the rank of the `input_` tensor.\n\n In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,\n `new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to\n the ith spec.\n\n If the ith bit of `begin_mask` is set, `begin[i]` is ignored and\n the fullest possible range in that dimension is used instead.\n `end_mask` works analogously, except with the end range.\n\n `foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.\n `foo[::-1]` reverses a tensor with shape 8.\n\n If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions\n as needed will be inserted between other dimensions. Only one\n non-zero bit is allowed in `ellipsis_mask`.\n\n For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is\n equivalent to `foo[3:5,:,:,4:5]` and\n `foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.\n\n If the ith bit of `new_axis_mask` is set, then `begin`,\n `end`, and `stride` are ignored and a new length 1 dimension is\n added at this point in the output tensor.\n\n For example,\n `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.\n\n If the ith bit of `shrink_axis_mask` is set, it implies that the ith\n specification shrinks the dimensionality by 1, taking on the value at index\n `begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in\n Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`\n equal to 2.\n\n\n NOTE: `begin` and `end` are zero-indexed.\n `strides` entries must be non-zero.\n\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]],\n [[3, 3, 3], [4, 4, 4]],\n [[5, 5, 5], [6, 6, 6]]])\n tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]\n tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],\n # [4, 4, 4]]]\n tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],\n # [3, 3, 3]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n end: An `int32` or `int64` `Tensor`.\n strides: An `int32` or `int64` `Tensor`.\n begin_mask: An `int32` mask.\n end_mask: An `int32` mask.\n ellipsis_mask: An `int32` mask.\n new_axis_mask: An `int32` mask.\n shrink_axis_mask: An `int32` mask.\n var: The variable corresponding to `input_` or None\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input`.\n \"\"\"\n\n if strides is None:\n strides = ones_like(begin)\n\n op = gen_array_ops.strided_slice(\n input=input_,\n begin=begin,\n end=end,\n strides=strides,\n name=name,\n begin_mask=begin_mask,\n end_mask=end_mask,\n ellipsis_mask=ellipsis_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask)\n\n parent_name = name\n\n if not (var is None and isinstance(op, ops.EagerTensor)):\n\n def assign(val, name=None):\n \"\"\"Closure that holds all the arguments to create an assignment.\"\"\"\n\n if var is None:\n raise ValueError(\"Sliced assignment is only supported for variables\")\n else:\n if name is None:\n name = parent_name + \"_assign\"\n\n return var._strided_slice_assign(\n begin=begin,\n end=end,\n strides=strides,\n value=val,\n name=name,\n begin_mask=begin_mask,\n end_mask=end_mask,\n ellipsis_mask=ellipsis_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask)\n\n op.assign = assign\n return op\n\n\ndef _SliceHelperVar(var, slice_spec):\n \"\"\"Creates a slice helper object given a variable.\n\n This allows creating a sub-tensor from part of the current contents\n of a variable. See `tf.Tensor.__getitem__` for detailed examples\n of slicing.\n\n This function in addition also allows assignment to a sliced range.\n This is similar to `__setitem__` functionality in Python. However,\n the syntax is different so that the user can capture the assignment\n operation for grouping or passing to `sess.run()`.\n For example,\n\n ```python\n import tensorflow as tf\n A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)\n with tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]\n\n op = A[:2,:2].assign(22. * tf.ones((2, 2)))\n print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]\n ```\n\n Note that assignments currently do not support NumPy broadcasting\n semantics.\n\n Args:\n var: An `ops.Variable` object.\n slice_spec: The arguments to `Tensor.__getitem__`.\n\n Returns:\n The appropriate slice of \"tensor\", based on \"slice_spec\".\n As an operator. The operator also has a `assign()` method\n that can be used to generate an assignment operator.\n\n Raises:\n ValueError: If a slice range is negative size.\n TypeError: TypeError: If the slice indices aren't int, slice,\n ellipsis, tf.newaxis or int32/int64 tensors.\n\n \"\"\"\n\n return _slice_helper(var.value(), slice_spec, var)\n\n\nops.Tensor._override_operator(\"__getitem__\", _slice_helper)\n\n\n@tf_export(\"parallel_stack\")\[email protected]_dispatch_support\ndef parallel_stack(values, name=\"parallel_stack\"):\n \"\"\"Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.\n\n Requires that the shape of inputs be known at graph construction time.\n\n Packs the list of tensors in `values` into a tensor with rank one higher than\n each tensor in `values`, by packing them along the first dimension.\n Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`\n tensor will have the shape `(N, A, B, C)`.\n\n For example:\n\n ```python\n x = tf.constant([1, 4])\n y = tf.constant([2, 5])\n z = tf.constant([3, 6])\n tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]\n ```\n\n The difference between `stack` and `parallel_stack` is that `stack` requires\n all the inputs be computed before the operation will begin but doesn't require\n that the input shapes be known during graph construction.\n\n `parallel_stack` will copy pieces of the input into the output as they become\n available, in some situations this can provide a performance benefit.\n\n Unlike `stack`, `parallel_stack` does NOT support backpropagation.\n\n This is the opposite of unstack. The numpy equivalent is\n\n tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n name: A name for this operation (optional).\n\n Returns:\n output: A stacked `Tensor` with the same type as `values`.\n \"\"\"\n with ops.name_scope(name):\n value_t = ops.convert_to_tensor(values[0])\n value_shape = ops.convert_to_tensor(value_t).get_shape()\n\n output_shape = tensor_shape.TensorShape([len(values)])\n output_shape = output_shape.concatenate(value_shape)\n # expand_dims converts concat to stack.\n return gen_array_ops.parallel_concat(\n [expand_dims(value, 0) for value in values], shape=output_shape)\n\n\n@tf_export(\"stack\")\[email protected]_dispatch_support\ndef stack(values, axis=0, name=\"stack\"):\n \"\"\"Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.\n\n See also `tf.concat`, `tf.tile`, `tf.repeat`.\n\n Packs the list of tensors in `values` into a tensor with rank one higher than\n each tensor in `values`, by packing them along the `axis` dimension.\n Given a list of length `N` of tensors of shape `(A, B, C)`;\n\n if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.\n if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.\n Etc.\n\n For example:\n\n >>> x = tf.constant([1, 4])\n >>> y = tf.constant([2, 5])\n >>> z = tf.constant([3, 6])\n >>> tf.stack([x, y, z])\n <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n array([[1, 4],\n [2, 5],\n [3, 6]], dtype=int32)>\n >>> tf.stack([x, y, z], axis=1)\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[1, 2, 3],\n [4, 5, 6]], dtype=int32)>\n\n This is the opposite of unstack. The numpy equivalent is `np.stack`\n\n >>> np.array_equal(np.stack([x, y, z]), tf.stack([x, y, z]))\n True\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n axis: An `int`. The axis to stack along. Defaults to the first dimension.\n Negative values wrap around, so the valid range is `[-(R+1), R+1)`.\n name: A name for this operation (optional).\n\n Returns:\n output: A stacked `Tensor` with the same type as `values`.\n\n Raises:\n ValueError: If `axis` is out of the range [-(R+1), R+1).\n \"\"\"\n if axis == 0:\n try:\n # If the input is a constant list, it can be converted to a constant op\n return ops.convert_to_tensor(values, name=name)\n except (TypeError, ValueError):\n pass # Input list contains non-constant tensors\n\n value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple() # pylint: disable=protected-access\n if value_shape is not None:\n expanded_num_dims = len(value_shape) + 1\n if axis < -expanded_num_dims or axis >= expanded_num_dims:\n raise ValueError(\"axis = %d not in [%d, %d)\" %\n (axis, -expanded_num_dims, expanded_num_dims))\n\n return gen_array_ops.pack(values, axis=axis, name=name)\n\n\n# pylint: disable=invalid-name\ndef _autopacking_helper(list_or_tuple, dtype, name):\n \"\"\"Converts the given list or tuple to a tensor by packing.\n\n Args:\n list_or_tuple: A (possibly nested) list or tuple containing a tensor.\n dtype: The element type of the returned tensor.\n name: A name for the returned tensor.\n\n Returns:\n A `tf.Tensor` with value equivalent to `list_or_tuple`.\n \"\"\"\n if context.executing_eagerly():\n # NOTE: Fast path when all the items are tensors, this doesn't do any type\n # checking.\n if all(isinstance(elem, core.Tensor) for elem in list_or_tuple):\n return gen_array_ops.pack(list_or_tuple, name=name)\n must_pack = False\n converted_elems = []\n with ops.name_scope(name) as scope:\n for i, elem in enumerate(list_or_tuple):\n if isinstance(elem, core.Tensor):\n if dtype is not None and elem.dtype.base_dtype != dtype:\n raise TypeError(\"Cannot convert a list containing a tensor of dtype \"\n \"%s to %s (Tensor is: %r)\" %\n (elem.dtype, dtype, elem))\n converted_elems.append(elem)\n must_pack = True\n elif isinstance(elem, (list, tuple)):\n converted_elem = _autopacking_helper(elem, dtype, str(i))\n if isinstance(converted_elem, core.Tensor):\n must_pack = True\n converted_elems.append(converted_elem)\n else:\n converted_elems.append(elem)\n if must_pack:\n elems_as_tensors = []\n for i, elem in enumerate(converted_elems):\n if isinstance(elem, core.Tensor):\n elems_as_tensors.append(elem)\n else:\n # NOTE(mrry): This is inefficient, but it enables us to\n # handle the case where the list arguments are other\n # convertible-to-tensor types, such as numpy arrays.\n elems_as_tensors.append(\n constant_op.constant(elem, dtype=dtype, name=str(i)))\n return gen_array_ops.pack(elems_as_tensors, name=scope)\n else:\n return converted_elems\n\n\ndef _get_dtype_from_nested_lists(list_or_tuple):\n \"\"\"Returns the dtype of any tensor-like object in `list_or_tuple`, if found.\n\n Args:\n list_or_tuple: A list or tuple representing an object that can be converted\n to a `tf.Tensor`.\n\n Returns:\n The dtype of any tensor-like object in `list_or_tuple`, or `None` if no\n such object exists.\n \"\"\"\n for elem in list_or_tuple:\n if isinstance(elem, core.Tensor):\n return elem.dtype.base_dtype\n elif isinstance(elem, (list, tuple)):\n maybe_dtype = _get_dtype_from_nested_lists(elem)\n if maybe_dtype is not None:\n return maybe_dtype\n return None\n\n\ndef _cast_nested_seqs_to_dtype(dtype):\n\n def _maybe_cast(elem):\n if isinstance(elem, core.Tensor):\n if dtype != elem.dtype.base_dtype:\n elem = gen_math_ops.cast(elem, dtype)\n return elem\n\n return _maybe_cast\n\n\n_NON_AUTOPACKABLE_TYPES = set(np.core.numerictypes.ScalarType)\n_NON_AUTOPACKABLE_TYPES.add(np.ndarray)\n\n\ndef _should_not_autopack(v):\n # The condition we really want is\n # any(isinstance(elem, core.Tensor))\n # but it is >5x slower due to abc.ABCMeta.__instancecheck__.\n # pylint: disable=unidiomatic-typecheck\n # TODO(slebedev): add nest.all?\n return all(type(elem) in _NON_AUTOPACKABLE_TYPES for elem in nest.flatten(v))\n # pylint: enable=unidiomatic-typecheck\n\n\ndef _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):\n \"\"\"Tensor conversion function that automatically packs arguments.\"\"\"\n if as_ref or _should_not_autopack(v):\n return NotImplemented\n inferred_dtype = _get_dtype_from_nested_lists(v)\n if inferred_dtype is None:\n # We did not find any tensor-like objects in the nested lists, so defer to\n # other conversion functions.\n return NotImplemented\n if dtype is None:\n dtype = inferred_dtype\n elif dtype != inferred_dtype:\n v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)\n return _autopacking_helper(v, dtype, name or \"packed\")\n\n\n# pylint: enable=invalid-name\n\n# NOTE: Register this conversion function to run *before* one that\n# assumes every element is a value.\nops.register_tensor_conversion_function((list, tuple),\n _autopacking_conversion_function, 99)\n\n\n@tf_export(\"unstack\")\[email protected]_dispatch_support\ndef unstack(value, num=None, axis=0, name=\"unstack\"):\n \"\"\"Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.\n\n Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.\n If `num` is not specified (the default), it is inferred from `value`'s shape.\n If `value.shape[axis]` is not known, `ValueError` is raised.\n\n For example, given a tensor of shape `(A, B, C, D)`;\n\n If `axis == 0` then the i'th tensor in `output` is the slice\n `value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.\n (Note that the dimension unpacked along is gone, unlike `split`).\n\n If `axis == 1` then the i'th tensor in `output` is the slice\n `value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.\n Etc.\n\n This is the opposite of stack.\n\n Args:\n value: A rank `R > 0` `Tensor` to be unstacked.\n num: An `int`. The length of the dimension `axis`. Automatically inferred if\n `None` (the default).\n axis: An `int`. The axis to unstack along. Defaults to the first dimension.\n Negative values wrap around, so the valid range is `[-R, R)`.\n name: A name for the operation (optional).\n\n Returns:\n The list of `Tensor` objects unstacked from `value`.\n\n Raises:\n ValueError: If `num` is unspecified and cannot be inferred.\n ValueError: If `axis` is out of the range [-R, R).\n \"\"\"\n if num is None:\n value = ops.convert_to_tensor(value)\n value_shape = value.get_shape()\n if value_shape.ndims is not None:\n if axis < -value_shape.ndims or axis >= value_shape.ndims:\n raise ValueError(\"axis = %d not in [%d, %d)\" %\n (axis, -value_shape.ndims, value_shape.ndims))\n num = value_shape.dims[axis].value\n if num is None:\n raise ValueError(\"Cannot infer num from shape %s\" % value_shape)\n return gen_array_ops.unpack(value, num=num, axis=axis, name=name)\n\n\n@tf_export(\"concat\")\[email protected]_dispatch_support\ndef concat(values, axis, name=\"concat\"):\n \"\"\"Concatenates tensors along one dimension.\n\n See also `tf.tile`, `tf.stack`, `tf.repeat`.\n\n Concatenates the list of tensors `values` along dimension `axis`. If\n `values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated\n result has shape\n\n [D0, D1, ... Raxis, ...Dn]\n\n where\n\n Raxis = sum(Daxis(i))\n\n That is, the data from the input tensors is joined along the `axis`\n dimension.\n\n The number of dimensions of the input tensors must match, and all dimensions\n except `axis` must be equal.\n\n For example:\n\n >>> t1 = [[1, 2, 3], [4, 5, 6]]\n >>> t2 = [[7, 8, 9], [10, 11, 12]]\n >>> tf.concat([t1, t2], 0)\n <tf.Tensor: shape=(4, 3), dtype=int32, numpy=\n array([[ 1, 2, 3],\n [ 4, 5, 6],\n [ 7, 8, 9],\n [10, 11, 12]], dtype=int32)>\n\n >>> tf.concat([t1, t2], 1)\n <tf.Tensor: shape=(2, 6), dtype=int32, numpy=\n array([[ 1, 2, 3, 7, 8, 9],\n [ 4, 5, 6, 10, 11, 12]], dtype=int32)>\n\n As in Python, the `axis` could also be negative numbers. Negative `axis`\n are interpreted as counting from the end of the rank, i.e.,\n `axis + rank(values)`-th dimension.\n\n For example:\n\n >>> t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]\n >>> t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]\n >>> tf.concat([t1, t2], -1)\n <tf.Tensor: shape=(2, 2, 4), dtype=int32, numpy=\n array([[[ 1, 2, 7, 4],\n [ 2, 3, 8, 4]],\n [[ 4, 4, 2, 10],\n [ 5, 3, 15, 11]]], dtype=int32)>\n\n Note: If you are concatenating along a new axis consider using stack.\n E.g.\n\n ```python\n tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)\n ```\n\n can be rewritten as\n\n ```python\n tf.stack(tensors, axis=axis)\n ```\n\n Args:\n values: A list of `Tensor` objects or a single `Tensor`.\n axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be\n in the range `[-rank(values), rank(values))`. As in Python, indexing for\n axis is 0-based. Positive axis in the rage of `[0, rank(values))` refers\n to `axis`-th dimension. And negative axis refers to `axis +\n rank(values)`-th dimension.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` resulting from concatenation of the input tensors.\n \"\"\"\n if not isinstance(values, (list, tuple)):\n values = [values]\n # TODO(mrry): Change to return values?\n if len(values) == 1: # Degenerate case of one tensor.\n # Make a throwaway call to convert_to_tensor to make sure\n # that axis is of the correct type, and make sure that\n # the returned tensor is a scalar.\n # TODO(keveman): Implement a standalone type and shape checker.\n with ops.name_scope(name) as scope:\n ops.convert_to_tensor(\n axis, name=\"concat_dim\",\n dtype=dtypes.int32).get_shape().assert_has_rank(0)\n return identity(values[0], name=name)\n return gen_array_ops.concat_v2(values=values, axis=axis, name=name)\n\n\n@tf_export(v1=[\"boolean_mask\"])\[email protected]_dispatch_support\ndef boolean_mask(tensor, mask, name=\"boolean_mask\", axis=None):\n \"\"\"Apply boolean mask to tensor.\n\n Numpy equivalent is `tensor[mask]`.\n\n ```python\n # 1-D example\n tensor = [0, 1, 2, 3]\n mask = np.array([True, False, True, False])\n boolean_mask(tensor, mask) # [0, 2]\n ```\n\n In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match\n the first K dimensions of `tensor`'s shape. We then have:\n `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`\n where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).\n The `axis` could be used with `mask` to indicate the axis to mask from.\n In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match\n the first `axis + dim(mask)` dimensions of `tensor`'s shape.\n\n See also: `tf.ragged.boolean_mask`, which can be applied to both dense and\n ragged tensors, and can be used if you need to preserve the masked dimensions\n of `tensor` (rather than flattening them, as `tf.boolean_mask` does).\n\n Args:\n tensor: N-D tensor.\n mask: K-D boolean tensor, K <= N and K must be known statically.\n name: A name for this operation (optional).\n axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By\n default, axis is 0 which will mask from the first dimension. Otherwise K +\n axis <= N.\n\n Returns:\n (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding\n to `True` values in `mask`.\n\n Raises:\n ValueError: If shapes do not conform.\n\n Examples:\n\n ```python\n # 2-D example\n tensor = [[1, 2], [3, 4], [5, 6]]\n mask = np.array([True, False, True])\n boolean_mask(tensor, mask) # [[1, 2], [5, 6]]\n ```\n \"\"\"\n\n def _apply_mask_1d(reshaped_tensor, mask, axis=None):\n \"\"\"Mask tensor along dimension 0 with a 1-D mask.\"\"\"\n indices = squeeze(where_v2(mask), axis=[1])\n return gather(reshaped_tensor, indices, axis=axis)\n\n with ops.name_scope(name, values=[tensor, mask]):\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n mask = ops.convert_to_tensor(mask, name=\"mask\")\n\n shape_mask = mask.get_shape()\n ndims_mask = shape_mask.ndims\n shape_tensor = tensor.get_shape()\n if ndims_mask == 0:\n raise ValueError(\"mask cannot be scalar.\")\n if ndims_mask is None:\n raise ValueError(\n \"Number of mask dimensions must be specified, even if some dimensions\"\n \" are None. E.g. shape=[None] is ok, but shape=None is not.\")\n axis = 0 if axis is None else axis\n axis_value = tensor_util.constant_value(axis)\n if axis_value is not None:\n axis = axis_value\n shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)\n\n leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])\n tensor = reshape(\n tensor,\n concat([\n shape(tensor)[:axis], [leading_size],\n shape(tensor)[axis + ndims_mask:]\n ], 0))\n # TODO(yongtang): tf.reshape in C++ kernel might have set the shape\n # correctly, so the following may not be needed? It still might ben\n # possible that there are some edge case where tensor_util.constant_value\n # resolves more case than ShapeInference of tf.reshape in C++ kernel.\n if axis_value is not None:\n first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()\n tensor.set_shape(\n tensor_shape.as_shape(shape_tensor[:axis]).concatenate(\n [first_dim]).concatenate(shape_tensor[axis + ndims_mask:]))\n\n mask = reshape(mask, [-1])\n return _apply_mask_1d(tensor, mask, axis)\n\n\n@tf_export(\"boolean_mask\", v1=[])\[email protected]_dispatch_support\ndef boolean_mask_v2(tensor, mask, axis=None, name=\"boolean_mask\"):\n \"\"\"Apply boolean mask to tensor.\n\n Numpy equivalent is `tensor[mask]`.\n\n ```python\n # 1-D example\n tensor = [0, 1, 2, 3]\n mask = np.array([True, False, True, False])\n boolean_mask(tensor, mask) # [0, 2]\n ```\n\n In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match\n the first K dimensions of `tensor`'s shape. We then have:\n `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`\n where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).\n The `axis` could be used with `mask` to indicate the axis to mask from.\n In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match\n the first `axis + dim(mask)` dimensions of `tensor`'s shape.\n\n See also: `tf.ragged.boolean_mask`, which can be applied to both dense and\n ragged tensors, and can be used if you need to preserve the masked dimensions\n of `tensor` (rather than flattening them, as `tf.boolean_mask` does).\n\n Args:\n tensor: N-D tensor.\n mask: K-D boolean tensor, K <= N and K must be known statically.\n axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By\n default, axis is 0 which will mask from the first dimension. Otherwise K +\n axis <= N.\n name: A name for this operation (optional).\n\n Returns:\n (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding\n to `True` values in `mask`.\n\n Raises:\n ValueError: If shapes do not conform.\n\n Examples:\n\n ```python\n # 2-D example\n tensor = [[1, 2], [3, 4], [5, 6]]\n mask = np.array([True, False, True])\n boolean_mask(tensor, mask) # [[1, 2], [5, 6]]\n ```\n \"\"\"\n return boolean_mask(tensor, mask, name, axis)\n\n\n@tf_export(\"sparse.mask\", v1=[\"sparse.mask\", \"sparse_mask\"])\[email protected]_endpoints(\"sparse_mask\")\ndef sparse_mask(a, mask_indices, name=None):\n \"\"\"Masks elements of `IndexedSlices`.\n\n Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that\n contains a subset of the slices of `a`. Only the slices at indices not\n specified in `mask_indices` are returned.\n\n This is useful when you need to extract a subset of slices in an\n `IndexedSlices` object.\n\n For example:\n\n ```python\n # `a` contains slices at indices [12, 26, 37, 45] from a large tensor\n # with shape [1000, 10]\n a.indices # [12, 26, 37, 45]\n tf.shape(a.values) # [4, 10]\n\n # `b` will be the subset of `a` slices at its second and third indices, so\n # we want to mask its first and last indices (which are at absolute\n # indices 12, 45)\n b = tf.sparse.mask(a, [12, 45])\n\n b.indices # [26, 37]\n tf.shape(b.values) # [2, 10]\n ```\n\n Args:\n a: An `IndexedSlices` instance.\n mask_indices: Indices of elements to mask.\n name: A name for the operation (optional).\n\n Returns:\n The masked `IndexedSlices` instance.\n \"\"\"\n with ops.name_scope(name, \"sparse_mask\", [a, mask_indices]) as name:\n indices = a.indices\n out_indices, to_gather = gen_array_ops.list_diff(indices, mask_indices)\n out_values = gather(a.values, to_gather, name=name)\n return ops.IndexedSlices(out_values, out_indices, a.dense_shape)\n\n\n@tf_export(\"unique\")\[email protected]_dispatch_support\ndef unique(x, out_idx=dtypes.int32, name=None):\n \"\"\"Finds unique elements in a 1-D tensor.\n\n See also `tf.unique_with_counts`.\n\n This operation returns a tensor `y` containing all of the unique elements\n of `x` sorted in the same order that they occur in `x`. This operation\n also returns a tensor `idx` the same size as `x` that contains the index\n of each value of `x` in the unique output `y`. In other words:\n\n\n y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]\n\n Example usage:\n\n >>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])\n >>> y, idx = unique(x)\n >>> y\n <tf.Tensor: id=5, shape=(5,), dtype=int32,\n numpy=array([1, 2, 4, 7, 8], dtype=int32)>\n >>> idx\n <tf.Tensor: id=6, shape=(9,), dtype=int32,\n numpy=array([0, 0, 1, 2, 2, 2, 3, 4, 4], dtype=int32)>\n\n Args:\n x: A Tensor. 1-D.\n out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to\n tf.int32.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of Tensor objects (y, idx).\n y: A Tensor. Has the same type as x.\n idx: A Tensor of type out_idx.\n\n \"\"\"\n # TODO(yongtang): switch to v2 once API deprecation\n # period (3 weeks) pass.\n # TODO(yongtang): The documentation should also\n # be updated when switch to v2.\n return gen_array_ops.unique(x, out_idx, name)\n\n\nunique.__doc__ = gen_array_ops.unique.__doc__\n\n\n@tf_export(\"unique_with_counts\")\[email protected]_dispatch_support\ndef unique_with_counts(x, out_idx=dtypes.int32, name=None):\n \"\"\"Finds unique elements in a 1-D tensor.\n\n See also `tf.unique`.\n\n This operation returns a tensor `y` containing all of the unique elements\n of `x` sorted in the same order that they occur in `x`. This operation\n also returns a tensor `idx` the same size as `x` that contains the index\n of each value of `x` in the unique output `y`. Finally, it returns a\n third tensor `count` that contains the count of each element of `y`\n in `x`. In other words:\n\n y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]\n\n Example usage:\n\n >>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])\n >>> y, idx, count = unique_with_counts(x)\n >>> y\n <tf.Tensor: id=8, shape=(5,), dtype=int32,\n numpy=array([1, 2, 4, 7, 8], dtype=int32)>\n >>> idx\n <tf.Tensor: id=9, shape=(9,), dtype=int32,\n numpy=array([0, 0, 1, 2, 2, 2, 3, 4, 4], dtype=int32)>\n >>> count\n <tf.Tensor: id=10, shape=(5,), dtype=int32,\n numpy=array([2, 1, 3, 1, 2], dtype=int32)>\n\n Args:\n x: A Tensor. 1-D.\n out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to\n tf.int32.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of Tensor objects (y, idx, count).\n y: A Tensor. Has the same type as x.\n idx: A Tensor of type out_idx.\n count: A Tensor of type out_idx.\n\n \"\"\"\n # TODO(yongtang): switch to v2 once API deprecation\n # period (3 weeks) pass.\n # TODO(yongtang): The documentation should also\n # be updated when switch to v2.\n return gen_array_ops.unique_with_counts(x, out_idx, name)\n\n\nunique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__\n\n\n@tf_export(\"split\")\[email protected]_dispatch_support\ndef split(value, num_or_size_splits, axis=0, num=None, name=\"split\"):\n \"\"\"Splits a tensor `value` into a list of sub tensors.\n\n See also `tf.unstack`.\n\n If `num_or_size_splits` is an integer, then `value` is split along the\n dimension `axis` into `num_or_size_splits` smaller tensors. This requires that\n `value.shape[axis]` is divisible by `num_or_size_splits`.\n\n If `num_or_size_splits` is a 1-D Tensor (or list), then `value` is split into\n `len(num_or_size_splits)` elements. The shape of the `i`-th\n element has the same size as the `value` except along dimension `axis` where\n the size is `num_or_size_splits[i]`.\n\n For example:\n\n >>> x = tf.Variable(tf.random.uniform([5, 30], -1, 1))\n >>>\n >>> # Split `x` into 3 tensors along dimension 1\n >>> s0, s1, s2 = tf.split(x, num_or_size_splits=3, axis=1)\n >>> tf.shape(s0).numpy()\n array([ 5, 10], dtype=int32)\n >>>\n >>> # Split `x` into 3 tensors with sizes [4, 15, 11] along dimension 1\n >>> split0, split1, split2 = tf.split(x, [4, 15, 11], 1)\n >>> tf.shape(split0).numpy()\n array([5, 4], dtype=int32)\n >>> tf.shape(split1).numpy()\n array([ 5, 15], dtype=int32)\n >>> tf.shape(split2).numpy()\n array([ 5, 11], dtype=int32)\n\n Args:\n value: The `Tensor` to split.\n num_or_size_splits: Either an integer indicating the number of splits along\n `axis` or a 1-D integer `Tensor` or Python list containing the sizes of\n each output tensor along `axis`. If a scalar, then it must evenly divide\n `value.shape[axis]`; otherwise the sum of sizes along the split axis\n must match that of the `value`.\n axis: An integer or scalar `int32` `Tensor`. The dimension along which to\n split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0.\n num: Optional, used to specify the number of outputs when it cannot be\n inferred from the shape of `size_splits`.\n name: A name for the operation (optional).\n\n Returns:\n if `num_or_size_splits` is a scalar returns a list of `num_or_size_splits`\n `Tensor` objects; if `num_or_size_splits` is a 1-D Tensor returns\n `num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting\n `value`.\n\n Raises:\n ValueError: If `num` is unspecified and cannot be inferred.\n \"\"\"\n size_splits = ops.convert_to_tensor(num_or_size_splits)\n if isinstance(num_or_size_splits,\n (numbers.Integral, tensor_shape.Dimension)):\n return gen_array_ops.split(\n axis=axis, num_split=num_or_size_splits, value=value, name=name)\n\n if size_splits._rank() == 0:\n raise ValueError(\n \"Rank-0 tensors are not supported as the num_or_size_splits argument \"\n \"to split. Argument provided: %s\" % (num_or_size_splits,))\n\n if num is None:\n size_splits_shape = size_splits._shape_tuple()\n if size_splits_shape:\n num = size_splits_shape[0]\n if num is None:\n raise ValueError(\"Cannot infer num from shape %s\" % num_or_size_splits)\n\n return gen_array_ops.split_v(\n value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)\n\n\n@tf_export(\"transpose\", v1=[])\[email protected]_dispatch_support\ndef transpose_v2(a, perm=None, conjugate=False, name=\"transpose\"):\n \"\"\"Transposes `a`, where `a` is a Tensor.\n\n Permutes the dimensions according to the value of `perm`.\n\n The returned tensor's dimension `i` will correspond to the input dimension\n `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank\n of the input tensor. Hence by default, this operation performs a regular\n matrix transpose on 2-D input Tensors.\n\n If conjugate is `True` and `a.dtype` is either `complex64` or `complex128`\n then the values of `a` are conjugated and transposed.\n\n @compatibility(numpy)\n In `numpy` transposes are memory-efficient constant time operations as they\n simply return a new view of the same data with adjusted `strides`.\n\n TensorFlow does not support strides, so `transpose` returns a new tensor with\n the items permuted.\n @end_compatibility\n\n For example:\n\n >>> x = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.transpose(x)\n <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n array([[1, 4],\n [2, 5],\n [3, 6]], dtype=int32)>\n\n Equivalently, you could call `tf.transpose(x, perm=[1, 0])`.\n\n If `x` is complex, setting conjugate=True gives the conjugate transpose:\n\n >>> x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n ... [4 + 4j, 5 + 5j, 6 + 6j]])\n >>> tf.transpose(x, conjugate=True)\n <tf.Tensor: shape=(3, 2), dtype=complex128, numpy=\n array([[1.-1.j, 4.-4.j],\n [2.-2.j, 5.-5.j],\n [3.-3.j, 6.-6.j]])>\n\n 'perm' is more useful for n-dimensional tensors where n > 2:\n\n >>> x = tf.constant([[[ 1, 2, 3],\n ... [ 4, 5, 6]],\n ... [[ 7, 8, 9],\n ... [10, 11, 12]]])\n\n As above, simply calling `tf.transpose` will default to `perm=[2,1,0]`.\n\n To take the transpose of the matrices in dimension-0 (such as when you are\n transposing matrices where 0 is the batch dimesnion), you would set\n `perm=[0,2,1]`.\n\n >>> tf.transpose(x, perm=[0, 2, 1])\n <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=\n array([[[ 1, 4],\n [ 2, 5],\n [ 3, 6]],\n [[ 7, 10],\n [ 8, 11],\n [ 9, 12]]], dtype=int32)>\n\n Note: This has a shorthand `linalg.matrix_transpose`):\n\n Args:\n a: A `Tensor`.\n perm: A permutation of the dimensions of `a`. This should be a vector.\n conjugate: Optional bool. Setting it to `True` is mathematically equivalent\n to tf.math.conj(tf.transpose(input)).\n name: A name for the operation (optional).\n\n Returns:\n A transposed `Tensor`.\n \"\"\"\n return transpose(a=a, perm=perm, name=name, conjugate=conjugate)\n\n\n@tf_export(v1=[\"transpose\"])\[email protected]_dispatch_support\ndef transpose(a, perm=None, name=\"transpose\", conjugate=False):\n \"\"\"Transposes `a`.\n\n Permutes the dimensions according to `perm`.\n\n The returned tensor's dimension i will correspond to the input dimension\n `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is\n the rank of the input tensor. Hence by default, this operation performs a\n regular matrix transpose on 2-D input Tensors. If conjugate is True and\n `a.dtype` is either `complex64` or `complex128` then the values of `a`\n are conjugated and transposed.\n\n @compatibility(numpy)\n In `numpy` transposes are memory-efficient constant time operations as they\n simply return a new view of the same data with adjusted `strides`.\n\n TensorFlow does not support strides, so `transpose` returns a new tensor with\n the items permuted.\n @end_compatibility\n\n For example:\n\n ```python\n x = tf.constant([[1, 2, 3], [4, 5, 6]])\n tf.transpose(x) # [[1, 4]\n # [2, 5]\n # [3, 6]]\n\n # Equivalently\n tf.transpose(x, perm=[1, 0]) # [[1, 4]\n # [2, 5]\n # [3, 6]]\n\n # If x is complex, setting conjugate=True gives the conjugate transpose\n x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n [4 + 4j, 5 + 5j, 6 + 6j]])\n tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],\n # [2 - 2j, 5 - 5j],\n # [3 - 3j, 6 - 6j]]\n\n # 'perm' is more useful for n-dimensional tensors, for n > 2\n x = tf.constant([[[ 1, 2, 3],\n [ 4, 5, 6]],\n [[ 7, 8, 9],\n [10, 11, 12]]])\n\n # Take the transpose of the matrices in dimension-0\n # (this common operation has a shorthand `linalg.matrix_transpose`)\n tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],\n # [2, 5],\n # [3, 6]],\n # [[7, 10],\n # [8, 11],\n # [9, 12]]]\n ```\n\n Args:\n a: A `Tensor`.\n perm: A permutation of the dimensions of `a`.\n name: A name for the operation (optional).\n conjugate: Optional bool. Setting it to `True` is mathematically equivalent\n to tf.math.conj(tf.transpose(input)).\n\n Returns:\n A transposed `Tensor`.\n \"\"\"\n with ops.name_scope(name, \"transpose\", [a]) as name:\n if not tensor_util.is_tensor(a):\n a = ops.convert_to_tensor(a, name=\"a\")\n\n if conjugate and a.dtype.is_complex:\n transpose_fn = gen_array_ops.conjugate_transpose\n else:\n transpose_fn = gen_array_ops.transpose\n\n if perm is not None:\n return transpose_fn(a, perm, name=name)\n\n rank = a.shape.rank\n if rank is None:\n perm = gen_math_ops._range(gen_array_ops.rank(a) - 1, -1, -1)\n else:\n perm = np.arange(rank - 1, -1, -1, dtype=np.int32)\n return transpose_fn(a, perm, name=name)\n\n\n# pylint: disable=invalid-name\n@tf_export(\n \"linalg.matrix_transpose\",\n v1=[\"linalg.transpose\", \"linalg.matrix_transpose\", \"matrix_transpose\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"matrix_transpose\", \"linalg.transpose\")\ndef matrix_transpose(a, name=\"matrix_transpose\", conjugate=False):\n \"\"\"Transposes last two dimensions of tensor `a`.\n\n For example:\n\n ```python\n x = tf.constant([[1, 2, 3], [4, 5, 6]])\n tf.linalg.matrix_transpose(x) # [[1, 4],\n # [2, 5],\n # [3, 6]]\n\n x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n [4 + 4j, 5 + 5j, 6 + 6j]])\n tf.linalg.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],\n # [2 - 2j, 5 - 5j],\n # [3 - 3j, 6 - 6j]]\n\n # Matrix with two batch dimensions.\n # x.shape is [1, 2, 3, 4]\n # tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3]\n ```\n\n Note that `tf.matmul` provides kwargs allowing for transpose of arguments.\n This is done with minimal cost, and is preferable to using this function. E.g.\n\n ```python\n # Good! Transpose is taken at minimal additional cost.\n tf.matmul(matrix, b, transpose_b=True)\n\n # Inefficient!\n tf.matmul(matrix, tf.linalg.matrix_transpose(b))\n ```\n\n @compatibility(numpy)\n In `numpy` transposes are memory-efficient constant time operations as they\n simply return a new view of the same data with adjusted `strides`.\n\n TensorFlow does not support strides, `linalg.matrix_transpose` returns a new\n tensor with the items permuted.\n @end_compatibility\n\n Args:\n a: A `Tensor` with `rank >= 2`.\n name: A name for the operation (optional).\n conjugate: Optional bool. Setting it to `True` is mathematically equivalent\n to tf.math.conj(tf.linalg.matrix_transpose(input)).\n\n Returns:\n A transposed batch matrix `Tensor`.\n\n Raises:\n ValueError: If `a` is determined statically to have `rank < 2`.\n \"\"\"\n with ops.name_scope(name, values=[a]):\n a = ops.convert_to_tensor(a, name=\"a\")\n\n # If we know the number of dimensions (statically), we can do two things:\n # 1. Check that `a` is a (batch) matrix.\n # 2. Use a Python list for perm. This preserves static shape information\n # and avoids extra computations.\n a_shape = a.get_shape()\n ndims = a_shape.ndims\n if ndims is not None:\n if ndims < 2:\n raise ValueError(\n \"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: \"\n \"%s\" % a_shape)\n perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]\n else:\n a_rank = rank(a)\n perm = concat(\n (gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)\n\n return transpose(a, perm=perm, conjugate=conjugate)\n\n\n@tf_export(\"linalg.diag\", v1=[\"linalg.diag\", \"matrix_diag\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"matrix_diag\")\ndef matrix_diag(diagonal,\n name=\"diag\",\n k=0,\n num_rows=-1,\n num_cols=-1,\n padding_value=0,\n align=\"RIGHT_LEFT\"):\n \"\"\"Returns a batched diagonal tensor with given batched diagonal values.\n\n Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th\n diagonals of a matrix, with everything else padded with `padding`. `num_rows`\n and `num_cols` specify the dimension of the innermost matrix of the output. If\n both are not specified, the op assumes the innermost matrix is square and\n infers its size from `k` and the innermost dimension of `diagonal`. If only\n one of them is specified, the op assumes the unspecified value is the smallest\n possible based on other criteria.\n\n Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor\n has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only\n one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has\n rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`.\n\n The second innermost dimension of `diagonal` has double meaning. When `k` is\n scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and\n the output tensor is:\n\n ```\n output[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper\n padding_value ; otherwise\n ```\n\n Otherwise, `M` is treated as the number of diagonals for the matrix in the\n same batch (`M = k[1]-k[0]+1`), and the output tensor is:\n\n ```\n output[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n padding_value ; otherwise\n ```\n where `d = n - m`, `diag_index = k[1] - d`, and\n `index_in_diag = n - max(d, 0) + offset`.\n\n `offset` is zero except when the alignment of the diagonal is to the right.\n ```\n offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n ```\n where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\n For example:\n\n ```\n # The main diagonal.\n diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)\n [5, 6, 7, 8]])\n tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)\n [0, 2, 0, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 4]],\n [[5, 0, 0, 0],\n [0, 6, 0, 0],\n [0, 0, 7, 0],\n [0, 0, 0, 8]]]\n\n # A superdiagonal (per batch).\n diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)\n [4, 5, 6]])\n tf.matrix_diag(diagonal, k = 1)\n ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)\n [0, 0, 2, 0],\n [0, 0, 0, 3],\n [0, 0, 0, 0]],\n [[0, 4, 0, 0],\n [0, 0, 5, 0],\n [0, 0, 0, 6],\n [0, 0, 0, 0]]]\n\n # A tridiagonal band (per batch).\n diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3)\n [1, 2, 3],\n [0, 4, 5]],\n [[2, 3, 0],\n [6, 7, 9],\n [0, 9, 1]]])\n tf.matrix_diag(diagonals, k = (-1, 1))\n ==> [[[1, 8, 0], # Output shape: (2, 3, 3)\n [4, 2, 9],\n [0, 5, 3]],\n [[6, 2, 0],\n [9, 7, 3],\n [0, 1, 9]]]\n\n # RIGHT_LEFT alignment.\n diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3)\n [1, 2, 3],\n [4, 5, 0]],\n [[0, 2, 3],\n [6, 7, 9],\n [9, 1, 0]]])\n tf.matrix_diag(diagonals, k = (-1, 1), align=\"RIGHT_LEFT\")\n ==> [[[1, 8, 0], # Output shape: (2, 3, 3)\n [4, 2, 9],\n [0, 5, 3]],\n [[6, 2, 0],\n [9, 7, 3],\n [0, 1, 9]]]\n\n # Rectangular matrix.\n diagonal = np.array([1, 2]) # Input shape: (2)\n tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)\n ==> [[0, 0, 0, 0], # Output shape: (3, 4)\n [1, 0, 0, 0],\n [0, 2, 0, 0]]\n\n # Rectangular matrix with inferred num_cols and padding_value = 9.\n tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)\n ==> [[9, 9], # Output shape: (3, 2)\n [1, 9],\n [9, 2]]\n ```\n\n Args:\n diagonal: A `Tensor` with `rank k >= 1`.\n name: A name for the operation (optional).\n k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the\n main diagonal, and negative value means subdiagonals. `k` can be a single\n integer (for a single diagonal) or a pair of integers specifying the low\n and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.\n num_rows: The number of rows of the output matrix. If it is not provided,\n the op assumes the output matrix is a square matrix and infers the matrix\n size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.\n num_cols: The number of columns of the output matrix. If it is not provided,\n the op assumes the output matrix is a square matrix and infers the matrix\n size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.\n padding_value: The value to fill the area outside the specified diagonal\n band with. Default is 0.\n align: Some diagonals are shorter than `max_diag_len` and need to be padded.\n `align` is a string specifying how superdiagonals and subdiagonals should\n be aligned, respectively. There are four possible alignments: \"RIGHT_LEFT\"\n (default), \"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\"\n aligns superdiagonals to the right (left-pads the row) and subdiagonals to\n the left (right-pads the row). It is the packing format LAPACK uses.\n cuSPARSE uses \"LEFT_RIGHT\", which is the opposite alignment.\n\n Returns:\n A Tensor. Has the same type as `diagonal`.\n \"\"\"\n # Special case to sidestep the tf.constant conversion error:\n # TypeError: Expected bool, got 0 of type 'int' instead.\n if hasattr(diagonal, \"dtype\") and diagonal.dtype == \"bool\":\n padding_value = bool(padding_value)\n\n return gen_array_ops.matrix_diag_v3(\n diagonal=diagonal,\n k=k,\n num_rows=num_rows,\n num_cols=num_cols,\n padding_value=padding_value,\n align=align,\n name=name)\n\n\n@tf_export(\"linalg.diag_part\", v1=[\"linalg.diag_part\", \"matrix_diag_part\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"matrix_diag_part\")\[email protected]_dispatch_support\ndef matrix_diag_part(\n input, # pylint:disable=redefined-builtin\n name=\"diag_part\",\n k=0,\n padding_value=0,\n align=\"RIGHT_LEFT\"):\n \"\"\"Returns the batched diagonal part of a batched tensor.\n\n Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched\n `input`.\n\n Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.\n Let `max_diag_len` be the maximum length among all diagonals to be extracted,\n `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\n Let `num_diags` be the number of diagonals to extract,\n `num_diags = k[1] - k[0] + 1`.\n\n If `num_diags == 1`, the output tensor is of rank `r - 1` with shape\n `[I, J, ..., L, max_diag_len]` and values:\n\n ```\n diagonal[i, j, ..., l, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n ```\n where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.\n\n Otherwise, the output tensor has rank `r` with dimensions\n `[I, J, ..., L, num_diags, max_diag_len]` with values:\n\n ```\n diagonal[i, j, ..., l, m, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n ```\n where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.\n\n `offset` is zero except when the alignment of the diagonal is to the right.\n ```\n offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n ```\n where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\n The input must be at least a matrix.\n\n For example:\n\n ```\n input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)\n [5, 6, 7, 8],\n [9, 8, 7, 6]],\n [[5, 4, 3, 2],\n [1, 2, 3, 4],\n [5, 6, 7, 8]]])\n\n # A main diagonal from each batch.\n tf.linalg.diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)\n [5, 2, 7]]\n\n # A superdiagonal from each batch.\n tf.linalg.diag_part(input, k = 1)\n ==> [[2, 7, 6], # Output shape: (2, 3)\n [4, 3, 8]]\n\n # A band from each batch.\n tf.linalg.diag_part(input, k = (-1, 2))\n ==> [[[3, 8, 0], # Output shape: (2, 4, 3)\n [2, 7, 6],\n [1, 6, 7],\n [0, 5, 8]],\n [[3, 4, 0],\n [4, 3, 8],\n [5, 2, 7],\n [0, 1, 6]]]\n\n # RIGHT_LEFT alignment.\n tf.linalg.diag_part(input, k = (-1, 2), align=\"RIGHT_LEFT\")\n ==> [[[0, 3, 8], # Output shape: (2, 4, 3)\n [2, 7, 6],\n [1, 6, 7],\n [5, 8, 0]],\n [[0, 3, 4],\n [4, 3, 8],\n [5, 2, 7],\n [1, 6, 0]]]\n\n # max_diag_len can be shorter than the main diagonal.\n tf.linalg.diag_part(input, k = (-2, -1))\n ==> [[[5, 8],\n [0, 9]],\n [[1, 6],\n [0, 5]]]\n\n # padding_value = 9\n tf.linalg.diag_part(input, k = (1, 3), padding_value = 9)\n ==> [[[4, 9, 9], # Output shape: (2, 3, 3)\n [3, 8, 9],\n [2, 7, 6]],\n [[2, 9, 9],\n [3, 4, 9],\n [4, 3, 8]]]\n\n ```\n\n Args:\n input: A `Tensor` with `rank k >= 2`.\n name: A name for the operation (optional).\n k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the\n main diagonal, and negative value means subdiagonals. `k` can be a single\n integer (for a single diagonal) or a pair of integers specifying the low\n and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.\n padding_value: The value to fill the area outside the specified diagonal\n band with. Default is 0.\n align: Some diagonals are shorter than `max_diag_len` and need to be padded.\n `align` is a string specifying how superdiagonals and subdiagonals should\n be aligned, respectively. There are four possible alignments: \"RIGHT_LEFT\"\n (default), \"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\"\n aligns superdiagonals to the right (left-pads the row) and subdiagonals to\n the left (right-pads the row). It is the packing format LAPACK uses.\n cuSPARSE uses \"LEFT_RIGHT\", which is the opposite alignment.\n\n Returns:\n A Tensor containing diagonals of `input`. Has the same type as `input`.\n \"\"\"\n # Special case to sidestep the tf.constant conversion error:\n # TypeError: Expected bool, got 0 of type 'int' instead.\n if hasattr(input, \"dtype\") and input.dtype == \"bool\":\n padding_value = bool(padding_value)\n\n return gen_array_ops.matrix_diag_part_v3(\n input=input, k=k, padding_value=padding_value, align=align, name=name)\n\n\n@tf_export(\"linalg.set_diag\", v1=[\"linalg.set_diag\", \"matrix_set_diag\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"matrix_set_diag\")\ndef matrix_set_diag(\n input, # pylint:disable=redefined-builtin\n diagonal,\n name=\"set_diag\",\n k=0,\n align=\"RIGHT_LEFT\"):\n \"\"\"Returns a batched matrix tensor with new batched diagonal values.\n\n Given `input` and `diagonal`, this operation returns a tensor with the\n same shape and values as `input`, except for the specified diagonals of the\n innermost matrices. These will be overwritten by the values in `diagonal`.\n\n `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or\n `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.\n Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.\n `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.\n `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,\n `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\n\n The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.\n If `k` is scalar or `k[0] == k[1]`:\n\n ```\n output[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]\n input[i, j, ..., l, m, n] ; otherwise\n ```\n\n Otherwise,\n\n ```\n output[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n input[i, j, ..., l, m, n] ; otherwise\n ```\n where `d = n - m`, `diag_index = k[1] - d`, and\n `index_in_diag = n - max(d, 0) + offset`.\n\n `offset` is zero except when the alignment of the diagonal is to the right.\n ```\n offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n ```\n where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\n For example:\n\n ```\n # The main diagonal.\n input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)\n [7, 7, 7, 7],\n [7, 7, 7, 7]],\n [[7, 7, 7, 7],\n [7, 7, 7, 7],\n [7, 7, 7, 7]]])\n diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)\n [4, 5, 6]])\n tf.matrix_set_diag(input, diagonal)\n ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)\n [7, 2, 7, 7],\n [7, 7, 3, 7]],\n [[4, 7, 7, 7],\n [7, 5, 7, 7],\n [7, 7, 6, 7]]]\n\n # A superdiagonal (per batch).\n tf.matrix_set_diag(input, diagonal, k = 1)\n ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)\n [7, 7, 2, 7],\n [7, 7, 7, 3]],\n [[7, 4, 7, 7],\n [7, 7, 5, 7],\n [7, 7, 7, 6]]]\n\n # A band of diagonals.\n diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3)\n [6, 5, 8],\n [1, 2, 3],\n [0, 4, 5]],\n [[1, 2, 0],\n [5, 6, 4],\n [6, 1, 2],\n [0, 3, 4]]])\n tf.matrix_set_diag(input, diagonals, k = (-1, 2))\n ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)\n [4, 2, 5, 1],\n [7, 5, 3, 8]],\n [[6, 5, 1, 7],\n [3, 1, 6, 2],\n [7, 4, 2, 4]]]\n\n # RIGHT_LEFT alignment.\n diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3)\n [6, 5, 8],\n [1, 2, 3],\n [4, 5, 0]],\n [[0, 1, 2],\n [5, 6, 4],\n [6, 1, 2],\n [3, 4, 0]]])\n tf.matrix_set_diag(input, diagonals, k = (-1, 2), align=\"RIGHT_LEFT\")\n ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)\n [4, 2, 5, 1],\n [7, 5, 3, 8]],\n [[6, 5, 1, 7],\n [3, 1, 6, 2],\n [7, 4, 2, 4]]]\n\n ```\n\n Args:\n input: A `Tensor` with rank `k + 1`, where `k >= 1`.\n diagonal: A `Tensor` with rank `k`, when `d_lower == d_upper`, or `k + 1`,\n otherwise. `k >= 1`.\n name: A name for the operation (optional).\n k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the\n main diagonal, and negative value means subdiagonals. `k` can be a single\n integer (for a single diagonal) or a pair of integers specifying the low\n and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.\n align: Some diagonals are shorter than `max_diag_len` and need to be padded.\n `align` is a string specifying how superdiagonals and subdiagonals should\n be aligned, respectively. There are four possible alignments: \"RIGHT_LEFT\"\n (default), \"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\"\n aligns superdiagonals to the right (left-pads the row) and subdiagonals to\n the left (right-pads the row). It is the packing format LAPACK uses.\n cuSPARSE uses \"LEFT_RIGHT\", which is the opposite alignment.\n \"\"\"\n return gen_array_ops.matrix_set_diag_v3(\n input=input, diagonal=diagonal, k=k, align=align, name=name)\n\n\n# pylint: enable=invalid-name\n\n\ndef _constant_if_small(value, shape, dtype, name):\n try:\n if np.prod(shape) < 1000:\n return constant(value, shape=shape, dtype=dtype, name=name)\n except TypeError:\n # Happens when shape is a Tensor, list with Tensor elements, etc.\n pass\n return None\n\n\ndef _tag_zeros_tensor(fun):\n \"\"\" Tags the result of function by setting _is_zeros_tensor attribute.\n\n This is useful to compute Hessians of fused ops such as cross_entropy.\n \"\"\"\n\n def wrapped(*args, **kwargs):\n tensor = fun(*args, **kwargs)\n tensor._is_zeros_tensor = True\n return tensor\n\n return tf_decorator.make_decorator(fun, wrapped)\n\n\n@tf_export(\"zeros\")\[email protected]_dispatch_support\n@_tag_zeros_tensor\ndef zeros(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to zero.\n\n See also `tf.zeros_like`, `tf.ones`, `tf.fill`, `tf.eye`.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to zero.\n\n >>> tf.zeros([3, 4], tf.int32)\n <tf.Tensor: shape=(3, 4), dtype=int32, numpy=\n array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=int32)>\n\n Args:\n shape: A `list` of integers, a `tuple` of integers, or\n a 1-D `Tensor` of type `int32`.\n dtype: The DType of an element in the resulting `Tensor`.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n dtype = dtypes.as_dtype(dtype).base_dtype\n with ops.name_scope(name, \"zeros\", [shape]) as name:\n if dtype == dtypes.bool:\n zero = False\n elif dtype == dtypes.string:\n zero = \"\"\n else:\n zero = 0\n\n if not isinstance(shape, ops.Tensor):\n try:\n if not context.executing_eagerly():\n # Create a constant if it won't be very big. Otherwise create a fill\n # op to prevent serialized GraphDefs from becoming too large.\n output = _constant_if_small(zero, shape, dtype, name)\n if output is not None:\n return output\n\n # Go through tensor shapes to get int64-if-needed semantics\n shape = constant_op._tensor_shape_tensor_conversion_function(\n tensor_shape.TensorShape(shape))\n except (TypeError, ValueError):\n # Happens when shape is a list with tensor elements\n shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)\n if not shape._shape_tuple():\n shape = reshape(shape, [-1]) # Ensure it's a vector\n output = fill(shape, constant(zero, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtype\n return output\n\n\n@tf_export(v1=[\"zeros_like\"])\[email protected]_dispatch_support\ndef zeros_like(tensor, dtype=None, name=None, optimize=True):\n \"\"\"Creates a tensor with all elements set to zero.\n\n See also `tf.zeros`.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the\n same type and shape as `tensor` with all elements set to zero. Optionally,\n you can use `dtype` to specify a new type for the returned tensor.\n\n Examples:\n\n >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.zeros_like(tensor)\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[0, 0, 0],\n [0, 0, 0]], dtype=int32)>\n\n >>> tf.zeros_like(tensor, dtype=tf.float32)\n <tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n array([[0., 0., 0.],\n [0., 0., 0.]], dtype=float32)>\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,\n `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,\n `complex64`, `complex128`, `bool` or `string`. (optional)\n name: A name for the operation (optional).\n optimize: if `True`, attempt to statically determine the shape of `tensor`\n and encode it as a constant. (optional, defaults to `True`)\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n return zeros_like_impl(tensor, dtype, name, optimize)\n\n\n@tf_export(\"zeros_like\", v1=[])\[email protected]_dispatch_support\ndef zeros_like_v2(\n input, # pylint: disable=redefined-builtin\n dtype=None,\n name=None):\n \"\"\"Creates a tensor with all elements set to zero.\n\n See also `tf.zeros`.\n\n Given a single tensor or array-like object (`input`), this operation returns\n a tensor of the same type and shape as `input` with all elements set to zero.\n Optionally, you can use `dtype` to specify a new type for the returned tensor.\n\n Examples:\n\n >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.zeros_like(tensor)\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[0, 0, 0],\n [0, 0, 0]], dtype=int32)>\n\n >>> tf.zeros_like(tensor, dtype=tf.float32)\n <tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n array([[0., 0., 0.],\n [0., 0., 0.]], dtype=float32)>\n\n >>> tf.zeros_like([[1, 2, 3], [4, 5, 6]])\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[0, 0, 0],\n [0, 0, 0]], dtype=int32)>\n\n Args:\n input: A `Tensor` or array-like object.\n dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,\n `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,\n `complex64`, `complex128`, `bool` or `string` (optional).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n return zeros_like_impl(input, dtype, name, optimize=True)\n\n\n@_tag_zeros_tensor\ndef zeros_like_impl(tensor, dtype, name, optimize=True):\n \"\"\"Internal implementation for the v1/v2 zeros_like API calls.\"\"\"\n with ops.name_scope(name, \"zeros_like\", [tensor]) as name:\n if not tensor_util.is_tensor(tensor):\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n tensor_shape = tensor.shape\n tensor_dtype = tensor.dtype\n\n if context.executing_eagerly():\n if dtype is not None and dtype != tensor_dtype:\n return zeros(\n shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)\n return gen_array_ops.zeros_like(tensor, name=name)\n\n # For now, variant types must be created via zeros_like; as we need to\n # pass the input variant object to the proper zeros callback.\n\n if (optimize and tensor_shape.is_fully_defined() and\n tensor_dtype != dtypes.variant):\n # We can produce a zeros tensor independent of the value of 'tensor',\n # since the shape is known statically.\n return zeros(tensor_shape, dtype=dtype or tensor_dtype, name=name)\n\n if dtype is not None and dtype != tensor_dtype and dtype != dtypes.variant:\n return zeros(\n shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)\n else:\n return gen_array_ops.zeros_like(tensor, name=name)\n\n\n@tf_export(v1=[\"ones_like\"])\[email protected]_dispatch_support\ndef ones_like(tensor, dtype=None, name=None, optimize=True):\n \"\"\"Creates a tensor with all elements set to 1.\n\n See also `tf.ones`.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the same\n type and shape as `tensor` with all elements set to 1. Optionally, you can\n specify a new type (`dtype`) for the returned tensor.\n\n For example:\n\n ```python\n tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]\n ```\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,\n `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`,\n `complex128` or `bool`.\n name: A name for the operation (optional).\n optimize: if true, attempt to statically determine the shape of 'tensor' and\n encode it as a constant.\n\n Returns:\n A `Tensor` with all elements set to 1.\n \"\"\"\n return ones_like_impl(tensor, dtype, name, optimize)\n\n\n@tf_export(\"ones_like\", v1=[])\[email protected]_dispatch_support\ndef ones_like_v2(\n input, # pylint: disable=redefined-builtin\n dtype=None,\n name=None):\n \"\"\"Creates a tensor of all ones that has the same shape as the input.\n\n See also `tf.ones`.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the\n same type and shape as `tensor` with all elements set to 1. Optionally,\n you can use `dtype` to specify a new type for the returned tensor.\n\n For example:\n\n >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.ones_like(tensor)\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[1, 1, 1],\n [1, 1, 1]], dtype=int32)>\n\n Args:\n input: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,\n `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,\n `complex64`, `complex128`, `bool` or `string`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to one.\n \"\"\"\n return ones_like_impl(input, dtype, name, optimize=True)\n\n\ndef ones_like_impl(tensor, dtype, name, optimize=True):\n \"\"\"Internal implementation for the v1/v2 ones_like API calls.\"\"\"\n with ops.name_scope(name, \"ones_like\", [tensor]) as name:\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n ones_shape = shape_internal(tensor, optimize=optimize)\n if dtype is None:\n dtype = tensor.dtype\n ret = ones(ones_shape, dtype=dtype, name=name)\n if not context.executing_eagerly():\n ret.set_shape(tensor.get_shape())\n return ret\n\n\n@tf_export(\"ones\")\[email protected]_dispatch_support\ndef ones(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to one (1).\n\n See also `tf.ones_like`, `tf.zeros`, `tf.fill`, `tf.eye`.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to one.\n\n >>> tf.ones([3, 4], tf.int32)\n <tf.Tensor: shape=(3, 4), dtype=int32, numpy=\n array([[1, 1, 1, 1],\n [1, 1, 1, 1],\n [1, 1, 1, 1]], dtype=int32)>\n\n Args:\n shape: A `list` of integers, a `tuple` of integers, or\n a 1-D `Tensor` of type `int32`.\n dtype: Optional DType of an element in the resulting `Tensor`. Default is\n `tf.float32`.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor` with all elements set to one (1).\n \"\"\"\n dtype = dtypes.as_dtype(dtype).base_dtype\n with ops.name_scope(name, \"ones\", [shape]) as name:\n one = True if dtype == dtypes.bool else 1\n if not isinstance(shape, ops.Tensor):\n try:\n if not context.executing_eagerly():\n # Create a constant if it won't be very big. Otherwise create a fill\n # op to prevent serialized GraphDefs from becoming too large.\n output = _constant_if_small(one, shape, dtype, name)\n if output is not None:\n return output\n\n # Go through tensor shapes to get int64-if-needed semantics\n shape = constant_op._tensor_shape_tensor_conversion_function(\n tensor_shape.TensorShape(shape))\n except (TypeError, ValueError):\n # Happens when shape is a list with tensor elements\n shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)\n if not shape._shape_tuple():\n shape = reshape(shape, [-1]) # Ensure it's a vector\n output = fill(shape, constant(one, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtype\n return output\n\n\n@tf_export(v1=[\"placeholder\"])\ndef placeholder(dtype, shape=None, name=None):\n \"\"\"Inserts a placeholder for a tensor that will be always fed.\n\n **Important**: This tensor will produce an error if evaluated. Its value must\n be fed using the `feed_dict` optional argument to `Session.run()`,\n `Tensor.eval()`, or `Operation.run()`.\n\n For example:\n\n ```python\n x = tf.compat.v1.placeholder(tf.float32, shape=(1024, 1024))\n y = tf.matmul(x, x)\n\n with tf.compat.v1.Session() as sess:\n print(sess.run(y)) # ERROR: will fail because x was not fed.\n\n rand_array = np.random.rand(1024, 1024)\n print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.\n ```\n\n @compatibility(eager)\n Placeholders are not compatible with eager execution.\n @end_compatibility\n\n Args:\n dtype: The type of elements in the tensor to be fed.\n shape: The shape of the tensor to be fed (optional). If the shape is not\n specified, you can feed a tensor of any shape.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` that may be used as a handle for feeding a value, but not\n evaluated directly.\n\n Raises:\n RuntimeError: if eager execution is enabled\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.placeholder() is not compatible with \"\n \"eager execution.\")\n\n return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)\n\n\n@tf_export(v1=[\"placeholder_with_default\"])\ndef placeholder_with_default(input, shape, name=None): # pylint: disable=redefined-builtin\n \"\"\"A placeholder op that passes through `input` when its output is not fed.\n\n Args:\n input: A `Tensor`. The default value to produce when output is not fed.\n shape: A `tf.TensorShape` or list of `int`s. The (possibly partial) shape of\n the tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n \"\"\"\n return gen_array_ops.placeholder_with_default(input, shape, name)\n\n\n@tf_export(v1=[\"sparse.placeholder\", \"sparse_placeholder\"])\[email protected]_endpoints(\"sparse_placeholder\")\ndef sparse_placeholder(dtype, shape=None, name=None):\n \"\"\"Inserts a placeholder for a sparse tensor that will be always fed.\n\n **Important**: This sparse tensor will produce an error if evaluated.\n Its value must be fed using the `feed_dict` optional argument to\n `Session.run()`, `Tensor.eval()`, or `Operation.run()`.\n\n For example:\n\n ```python\n x = tf.compat.v1.sparse.placeholder(tf.float32)\n y = tf.sparse.reduce_sum(x)\n\n with tf.compat.v1.Session() as sess:\n print(sess.run(y)) # ERROR: will fail because x was not fed.\n\n indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)\n values = np.array([1.0, 2.0], dtype=np.float32)\n shape = np.array([7, 9, 2], dtype=np.int64)\n print(sess.run(y, feed_dict={\n x: tf.compat.v1.SparseTensorValue(indices, values, shape)})) # Will\n succeed.\n print(sess.run(y, feed_dict={\n x: (indices, values, shape)})) # Will succeed.\n\n sp = tf.sparse.SparseTensor(indices=indices, values=values,\n dense_shape=shape)\n sp_value = sp.eval(session=sess)\n print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.\n ```\n\n @compatibility{eager} Placeholders are not compatible with eager execution.\n\n Args:\n dtype: The type of `values` elements in the tensor to be fed.\n shape: The shape of the tensor to be fed (optional). If the shape is not\n specified, you can feed a sparse tensor of any shape.\n name: A name for prefixing the operations (optional).\n\n Returns:\n A `SparseTensor` that may be used as a handle for feeding a value, but not\n evaluated directly.\n\n Raises:\n RuntimeError: if eager execution is enabled\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"`sparse_placeholder` is not compatible with \"\n \"eager execution.\")\n\n shape_name = (name + \"/shape\") if name is not None else None\n default_shape_name = (name + \"/shape_default\") if name is not None else None\n if shape is None:\n rank = None\n dense_shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)\n dense_shape_default = tensor_util.constant_value_as_shape(dense_shape)\n else:\n if isinstance(shape, ops.Tensor):\n rank = shape.get_shape()[0]\n dense_shape_default = tensor_util.constant_value_as_shape(shape)\n else:\n rank = len(shape)\n # determine the shape, to override the `.shape` property of the\n # `SparseTensor`\n dense_shape_default = tensor_shape.TensorShape(\n tuple(None if dim == -1 else dim for dim in shape))\n shape = tuple(tensor_shape.dimension_value(dim) for dim in shape)\n shape = tuple(-1 if dim is None else dim for dim in shape)\n shape = ops.convert_to_tensor(\n shape, dtype=dtypes.int64, name=default_shape_name)\n\n # `dense_shape` needs to be feedable (for users that treat this as an\n # actual placeholder). `constant_value_as_shape` sets constants to\n # not-feedable. placeholder_with_default works, but blocks `SparseTensor`\n # from reading the default value back out.\n dense_shape = placeholder_with_default(\n shape, shape=shape.shape, name=shape_name)\n\n result = sparse_tensor.SparseTensor(\n values=placeholder(\n dtype,\n shape=[None],\n name=(name + \"/values\") if name is not None else None),\n indices=placeholder(\n dtypes.int64,\n shape=[None, rank],\n name=(name + \"/indices\") if name is not None else None),\n dense_shape=dense_shape)\n\n # Now the SparseTensor.shape is a list of `None`s, since it couldn't read the\n # default shape out of the placeholder. Override that\n # shape to be the value determined here, so partial shapes can be\n # propagated.\n result._dense_shape_default = dense_shape_default\n return result\n\n# pylint: enable=redefined-outer-name\n\n\n@tf_export(\"pad\", v1=[])\[email protected]_dispatch_support\ndef pad_v2(tensor, paddings, mode=\"CONSTANT\", constant_values=0, name=None):\n \"\"\"Pads a tensor.\n\n This operation pads a `tensor` according to the `paddings` you specify.\n `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of\n `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how\n many values to add before the contents of `tensor` in that dimension, and\n `paddings[D, 1]` indicates how many values to add after the contents of\n `tensor` in that dimension. If `mode` is \"REFLECT\" then both `paddings[D, 0]`\n and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If\n `mode` is \"SYMMETRIC\" then both `paddings[D, 0]` and `paddings[D, 1]` must be\n no greater than `tensor.dim_size(D)`.\n\n The padded size of each dimension D of the output is:\n\n `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`\n\n For example:\n\n ```python\n t = tf.constant([[1, 2, 3], [4, 5, 6]])\n paddings = tf.constant([[1, 1,], [2, 2]])\n # 'constant_values' is 0.\n # rank of 't' is 2.\n tf.pad(t, paddings, \"CONSTANT\") # [[0, 0, 0, 0, 0, 0, 0],\n # [0, 0, 1, 2, 3, 0, 0],\n # [0, 0, 4, 5, 6, 0, 0],\n # [0, 0, 0, 0, 0, 0, 0]]\n\n tf.pad(t, paddings, \"REFLECT\") # [[6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1],\n # [6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1]]\n\n tf.pad(t, paddings, \"SYMMETRIC\") # [[2, 1, 1, 2, 3, 3, 2],\n # [2, 1, 1, 2, 3, 3, 2],\n # [5, 4, 4, 5, 6, 6, 5],\n # [5, 4, 4, 5, 6, 6, 5]]\n ```\n\n Args:\n tensor: A `Tensor`.\n paddings: A `Tensor` of type `int32`.\n mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\" (case-insensitive)\n constant_values: In \"CONSTANT\" mode, the scalar pad value to use. Must be\n same type as `tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n\n Raises:\n ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n \"\"\"\n return pad(tensor, paddings, mode, name, constant_values)\n\n\n@tf_export(v1=[\"pad\"])\[email protected]_dispatch_support\ndef pad(tensor, paddings, mode=\"CONSTANT\", name=None, constant_values=0): # pylint: disable=invalid-name\n \"\"\"Pads a tensor.\n\n This operation pads a `tensor` according to the `paddings` you specify.\n `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of\n `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how\n many values to add before the contents of `tensor` in that dimension, and\n `paddings[D, 1]` indicates how many values to add after the contents of\n `tensor` in that dimension. If `mode` is \"REFLECT\" then both `paddings[D, 0]`\n and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If\n `mode` is \"SYMMETRIC\" then both `paddings[D, 0]` and `paddings[D, 1]` must be\n no greater than `tensor.dim_size(D)`.\n\n The padded size of each dimension D of the output is:\n\n `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`\n\n For example:\n\n ```python\n t = tf.constant([[1, 2, 3], [4, 5, 6]])\n paddings = tf.constant([[1, 1,], [2, 2]])\n # 'constant_values' is 0.\n # rank of 't' is 2.\n tf.pad(t, paddings, \"CONSTANT\") # [[0, 0, 0, 0, 0, 0, 0],\n # [0, 0, 1, 2, 3, 0, 0],\n # [0, 0, 4, 5, 6, 0, 0],\n # [0, 0, 0, 0, 0, 0, 0]]\n\n tf.pad(t, paddings, \"REFLECT\") # [[6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1],\n # [6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1]]\n\n tf.pad(t, paddings, \"SYMMETRIC\") # [[2, 1, 1, 2, 3, 3, 2],\n # [2, 1, 1, 2, 3, 3, 2],\n # [5, 4, 4, 5, 6, 6, 5],\n # [5, 4, 4, 5, 6, 6, 5]]\n ```\n\n Args:\n tensor: A `Tensor`.\n paddings: A `Tensor` of type `int32`.\n mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\" (case-insensitive)\n name: A name for the operation (optional).\n constant_values: In \"CONSTANT\" mode, the scalar pad value to use. Must be\n same type as `tensor`.\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n\n Raises:\n ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n \"\"\"\n\n # Convert lower/mixed case to upper for NumPy compatibility\n # NumPy uses all lower-case modes.\n mode = mode.upper()\n if mode == \"CONSTANT\":\n # TODO(rjryan): Once the forward compatibility period (3 weeks) have passed\n # remove the \"Pad\" fallback here.\n if not tensor_util.is_tensor(constant_values) and constant_values == 0:\n result = gen_array_ops.pad(tensor, paddings, name=name)\n else:\n result = gen_array_ops.pad_v2(\n tensor, paddings, constant_values, name=name)\n elif mode == \"REFLECT\":\n result = gen_array_ops.mirror_pad(\n tensor, paddings, mode=\"REFLECT\", name=name)\n elif mode == \"SYMMETRIC\":\n result = gen_array_ops.mirror_pad(\n tensor, paddings, mode=\"SYMMETRIC\", name=name)\n else:\n raise ValueError(\"Unknown padding mode: %s\" % mode)\n\n # Restore shape information where possible.\n if not context.executing_eagerly():\n paddings_constant = _get_paddings_constant(paddings)\n input_shape = (\n tensor_shape.TensorShape(tensor.shape)\n if isinstance(tensor, ops.Tensor) else result.op.inputs[0].shape)\n if (input_shape.ndims is not None and\n not result.shape.is_fully_defined() and paddings_constant is not None):\n new_shape = []\n for padding, dim in zip(paddings_constant, input_shape.as_list()):\n if padding is None or dim is None or any((x is None for x in padding)):\n new_shape.append(None)\n else:\n new_shape.append(sum(padding) + dim)\n result.set_shape(new_shape)\n\n return result\n\n\ndef _get_paddings_constant(paddings):\n \"\"\"Helper to get the constant values of the paddings arg to pad().\n\n Used under V1 graph mode to facilitate computation of the shape of the output\n tensor of `pad()`.\n\n Args:\n paddings: The same paddings arg as passed to pad(). Can be a Tensor, or\n a nested list or tuple of Tensor and/or numbers.\n\n Returns:\n A nested list or numbers or `None`, in which `None` indicates unknown\n padding size.\n \"\"\"\n if isinstance(paddings, ops.Tensor):\n return tensor_util.constant_value(paddings, partial=True)\n elif isinstance(paddings, (list, tuple)):\n return [_get_paddings_constant(x) for x in paddings]\n else:\n return paddings\n\n\n@tf_export(\"meshgrid\")\[email protected]_dispatch_support\ndef meshgrid(*args, **kwargs):\n \"\"\"Broadcasts parameters for evaluation on an N-D grid.\n\n Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`\n of N-D coordinate arrays for evaluating expressions on an N-D grid.\n\n Notes:\n\n `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.\n When the `indexing` argument is set to 'xy' (the default), the broadcasting\n instructions for the first two dimensions are swapped.\n\n Examples:\n\n Calling `X, Y = meshgrid(x, y)` with the tensors\n\n ```python\n x = [1, 2, 3]\n y = [4, 5, 6]\n X, Y = tf.meshgrid(x, y)\n # X = [[1, 2, 3],\n # [1, 2, 3],\n # [1, 2, 3]]\n # Y = [[4, 4, 4],\n # [5, 5, 5],\n # [6, 6, 6]]\n ```\n\n Args:\n *args: `Tensor`s with rank 1.\n **kwargs:\n - indexing: Either 'xy' or 'ij' (optional, default: 'xy').\n - name: A name for the operation (optional).\n\n Returns:\n outputs: A list of N `Tensor`s with rank N.\n\n Raises:\n TypeError: When no keyword arguments (kwargs) are passed.\n ValueError: When indexing keyword argument is not one of `xy` or `ij`.\n \"\"\"\n\n indexing = kwargs.pop(\"indexing\", \"xy\")\n name = kwargs.pop(\"name\", \"meshgrid\")\n if kwargs:\n key = list(kwargs.keys())[0]\n raise TypeError(\"'{}' is an invalid keyword argument \"\n \"for this function\".format(key))\n\n if indexing not in (\"xy\", \"ij\"):\n raise ValueError(\"indexing parameter must be either 'xy' or 'ij'\")\n\n with ops.name_scope(name, \"meshgrid\", args) as name:\n ndim = len(args)\n s0 = (1,) * ndim\n\n if not ndim:\n return []\n\n # Prepare reshape by inserting dimensions with size 1 where needed\n output = []\n for i, x in enumerate(args):\n output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))\n # Create parameters for broadcasting each tensor to the full size\n shapes = [size(x) for x in args]\n\n output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype\n\n if indexing == \"xy\" and ndim > 1:\n output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))\n output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))\n shapes[0], shapes[1] = shapes[1], shapes[0]\n\n # TODO(nolivia): improve performance with a broadcast\n mult_fact = ones(shapes, output_dtype)\n return [x * mult_fact for x in output]\n\n\nNEW_AXIS = -1\nSHRINK_AXIS = -2\n\n\n# PEP-8 naming\n# pylint: disable=invalid-name,redefined-outer-name\ndef _compute_size_of_strided_dim(shrink, spec, size):\n \"\"\"Computes the size of a single strided slice dimension.\"\"\"\n\n unknown = None # Document what None means here.\n use_full_range = None # Document other use of None.\n # if this is a shrink axis (i.e. a non-range index)\n # it either will produce an error or return 1\n if shrink:\n return 1\n if size is unknown or size.value is unknown:\n return unknown\n size = size.value\n stride = spec.step\n if stride is not unknown:\n if stride == 0:\n return unknown\n stride = spec.step\n valid_range = [0, size] if stride > 0 else [-1, size - 1]\n\n # PEP-8 naming\n # pylint: disable=invalid-name\n def canonical(x, c):\n if x is use_full_range:\n return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]\n else:\n x_fwd = size + x if x < 0 else x # make negative indices positive\n return max(valid_range[0], min(valid_range[1], x_fwd))\n\n begin = canonical(spec.start, 0)\n end = canonical(spec.stop, 1)\n interval_length = end - begin\n if interval_length == 0 or ((interval_length < 0) != (stride < 0)):\n return 0\n else:\n remainder = 1 if interval_length % stride != 0 else 0\n return interval_length // stride + remainder\n else:\n return unknown # unknown because stride is unknown\n\n\ndef _TileGradShape(op):\n \"\"\"Shape function for the TileGrad op.\"\"\"\n multiples_shape = op.inputs[1].get_shape().with_rank(1)\n input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])\n # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)\n # it is a vector of non-negative integers, and (ii) doing so allows\n # us to handle partially-known multiples.\n multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(\n input_shape.ndims)\n if multiples.ndims is None:\n return [tensor_shape.unknown_shape()]\n else:\n output_dims = []\n for dim, multiple in zip(input_shape.dims, multiples.dims):\n output_dims.append(dim // multiple)\n return [tensor_shape.TensorShape(output_dims)]\n\n\n@tf_export(\"edit_distance\")\[email protected]_dispatch_support\ndef edit_distance(hypothesis, truth, normalize=True, name=\"edit_distance\"):\n \"\"\"Computes the Levenshtein distance between sequences.\n\n This operation takes variable-length sequences (`hypothesis` and `truth`),\n each provided as a `SparseTensor`, and computes the Levenshtein distance.\n You can normalize the edit distance by length of `truth` by setting\n `normalize` to true.\n\n For example, given the following input:\n\n ```python\n # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:\n # (0,0) = [\"a\"]\n # (1,0) = [\"b\"]\n hypothesis = tf.sparse.SparseTensor(\n [[0, 0, 0],\n [1, 0, 0]],\n [\"a\", \"b\"],\n (2, 1, 1))\n\n # 'truth' is a tensor of shape `[2, 2]` with variable-length values:\n # (0,0) = []\n # (0,1) = [\"a\"]\n # (1,0) = [\"b\", \"c\"]\n # (1,1) = [\"a\"]\n truth = tf.sparse.SparseTensor(\n [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]],\n [\"a\", \"b\", \"c\", \"a\"],\n (2, 2, 2))\n\n normalize = True\n ```\n\n This operation would return the following:\n\n ```python\n # 'output' is a tensor of shape `[2, 2]` with edit distances normalized\n # by 'truth' lengths.\n output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis\n [0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis\n ```\n\n Args:\n hypothesis: A `SparseTensor` containing hypothesis sequences.\n truth: A `SparseTensor` containing truth sequences.\n normalize: A `bool`. If `True`, normalizes the Levenshtein distance by\n length of `truth.`\n name: A name for the operation (optional).\n\n Returns:\n A dense `Tensor` with rank `R - 1`, where R is the rank of the\n `SparseTensor` inputs `hypothesis` and `truth`.\n\n Raises:\n TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.\n \"\"\"\n if not isinstance(\n hypothesis,\n (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n raise TypeError(\"Hypothesis must be a SparseTensor.\")\n if not isinstance(\n truth, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n raise TypeError(\"Truth must be a SparseTensor.\")\n\n return gen_array_ops.edit_distance(\n hypothesis.indices,\n hypothesis.values,\n hypothesis.dense_shape,\n truth.indices,\n truth.values,\n truth.dense_shape,\n normalize=normalize,\n name=name)\n\n\[email protected](\"FakeQuantWithMinMaxArgs\")\ndef _FakeQuantWithMinMaxArgsGradient(op, grad):\n \"\"\"Gradient for FakeQuantWithMinMaxArgs op.\"\"\"\n return fake_quant_with_min_max_args_gradient(\n grad,\n op.inputs[0],\n min=op.get_attr(\"min\"),\n max=op.get_attr(\"max\"),\n num_bits=op.get_attr(\"num_bits\"),\n narrow_range=op.get_attr(\"narrow_range\"))\n\n\[email protected](\"FakeQuantWithMinMaxVars\")\ndef _FakeQuantWithMinMaxVarsGradient(op, grad):\n \"\"\"Gradient for FakeQuantWithMinMaxVars op.\"\"\"\n return fake_quant_with_min_max_vars_gradient(\n grad,\n op.inputs[0],\n op.inputs[1],\n op.inputs[2],\n num_bits=op.get_attr(\"num_bits\"),\n narrow_range=op.get_attr(\"narrow_range\"))\n\n\[email protected](\"FakeQuantWithMinMaxVarsPerChannel\")\ndef _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):\n \"\"\"Gradient for FakeQuantWithMinMaxVarsPerChannel op.\"\"\"\n return fake_quant_with_min_max_vars_per_channel_gradient(\n grad,\n op.inputs[0],\n op.inputs[1],\n op.inputs[2],\n num_bits=op.get_attr(\"num_bits\"),\n narrow_range=op.get_attr(\"narrow_range\"))\n\n\n@tf_export(\"required_space_to_batch_paddings\")\ndef required_space_to_batch_paddings(input_shape,\n block_shape,\n base_paddings=None,\n name=None):\n \"\"\"Calculate padding required to make block_shape divide input_shape.\n\n This function can be used to calculate a suitable paddings argument for use\n with space_to_batch_nd and batch_to_space_nd.\n\n Args:\n input_shape: int32 Tensor of shape [N].\n block_shape: int32 Tensor of shape [N].\n base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum\n amount of padding to use. All elements must be >= 0. If not specified,\n defaults to 0.\n name: string. Optional name prefix.\n\n Returns:\n (paddings, crops), where:\n\n `paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]\n satisfying:\n\n paddings[i, 0] = base_paddings[i, 0].\n 0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]\n (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0\n\n crops[i, 0] = 0\n crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]\n\n Raises: ValueError if called with incompatible shapes.\n \"\"\"\n with ops.name_scope(name, \"required_space_to_batch_paddings\",\n [input_shape, block_shape]):\n input_shape = ops.convert_to_tensor(\n input_shape, dtype=dtypes.int32, name=\"input_shape\")\n block_shape = ops.convert_to_tensor(\n block_shape, dtype=dtypes.int32, name=\"block_shape\")\n\n block_shape.get_shape().assert_is_fully_defined()\n block_shape.get_shape().assert_has_rank(1)\n num_block_dims = block_shape.get_shape().dims[0].value\n if num_block_dims == 0:\n return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)\n\n input_shape.get_shape().assert_is_compatible_with([num_block_dims])\n\n if base_paddings is not None:\n base_paddings = ops.convert_to_tensor(\n base_paddings, dtype=dtypes.int32, name=\"base_paddings\")\n base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])\n else:\n base_paddings = zeros([num_block_dims, 2], dtypes.int32)\n\n const_block_shape = tensor_util.constant_value(block_shape)\n const_input_shape = tensor_util.constant_value(input_shape)\n const_base_paddings = tensor_util.constant_value(base_paddings)\n if (const_block_shape is not None and const_input_shape is not None and\n const_base_paddings is not None):\n block_shape = const_block_shape\n input_shape = const_input_shape\n base_paddings = const_base_paddings\n\n # Use same expression for both constant and non-constant case.\n pad_start = base_paddings[:, 0]\n orig_pad_end = base_paddings[:, 1]\n full_input_shape = input_shape + pad_start + orig_pad_end\n pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape\n pad_end = orig_pad_end + pad_end_extra\n\n result_paddings = stack(\n [[pad_start[i], pad_end[i]] for i in range(num_block_dims)],\n name=\"paddings\")\n result_crops = stack([[0, pad_end_extra[i]] for i in range(num_block_dims)],\n name=\"crops\")\n return result_paddings, result_crops\n\n\n@tf_export(v1=[\"nn.space_to_batch\", \"space_to_batch\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"space_to_batch\")\ndef space_to_batch( # pylint: disable=missing-docstring\n input, # pylint: disable=redefined-builtin\n paddings,\n block_size=None,\n name=None,\n block_shape=None): # pylint: disable=redefined-builtin\n block_size = deprecation.deprecated_argument_lookup(\"block_shape\",\n block_shape, \"block_size\",\n block_size)\n result = space_to_batch_nd(\n input,\n paddings=paddings,\n block_shape=np.array([block_size, block_size], dtype=np.int64),\n name=name)\n result.set_shape(result.get_shape().with_rank(4))\n return result\n\n\nspace_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__\n\n\n@tf_export(\"space_to_batch\", \"nn.space_to_batch\", v1=[])\[email protected]_dispatch_support\ndef space_to_batch_v2(input, block_shape, paddings, name=None): # pylint: disable=redefined-builtin\n return space_to_batch_nd(input, block_shape, paddings, name)\n\n\nspace_to_batch_v2.__doc__ = gen_array_ops.space_to_batch_nd.__doc__\n\n\n@tf_export(v1=[\"nn.space_to_depth\", \"space_to_depth\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"space_to_depth\")\ndef space_to_depth(input, block_size, name=None, data_format=\"NHWC\"): # pylint: disable=redefined-builtin\n return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)\n\n\nspace_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__\n\n\n@tf_export(\"nn.space_to_depth\", v1=[])\[email protected]_dispatch_support\ndef space_to_depth_v2(input, block_size, data_format=\"NHWC\", name=None): # pylint: disable=redefined-builtin\n return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)\n\n\nspace_to_depth_v2.__doc__ = gen_array_ops.space_to_depth.__doc__\n\n\n@tf_export(v1=[\"nn.depth_to_space\", \"depth_to_space\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"depth_to_space\")\ndef depth_to_space(input, block_size, name=None, data_format=\"NHWC\"): # pylint: disable=redefined-builtin\n return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)\n\n\ndepth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__\n\n\n@tf_export(\"nn.depth_to_space\", v1=[])\[email protected]_dispatch_support\ndef depth_to_space_v2(input, block_size, data_format=\"NHWC\", name=None): # pylint: disable=redefined-builtin\n return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)\n\n\ndepth_to_space_v2.__doc__ = gen_array_ops.depth_to_space.__doc__\n\n\n@tf_export(v1=[\"batch_to_space\"])\[email protected]_dispatch_support\ndef batch_to_space(input, crops, block_size, name=None, block_shape=None): # pylint: disable=redefined-builtin,missing-docstring\n block_size = deprecation.deprecated_argument_lookup(\"block_shape\",\n block_shape, \"block_size\",\n block_size)\n result = batch_to_space_nd(\n input,\n crops=crops,\n block_shape=np.array([block_size, block_size], dtype=np.int64),\n name=name)\n result.set_shape(result.get_shape().with_rank(4))\n return result\n\n\nbatch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__\n\n\n@tf_export(\"batch_to_space\", v1=[])\[email protected]_dispatch_support\ndef batch_to_space_v2(input, block_shape, crops, name=None): # pylint: disable=redefined-builtin\n \"\"\"BatchToSpace for N-D tensors of type T.\n\n This operation reshapes the \"batch\" dimension 0 into `M + 1` dimensions of\n shape `block_shape + [batch]`, interleaves these blocks back into the grid\n defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the\n same rank as the input. The spatial dimensions of this intermediate result\n are then optionally cropped according to `crops` to produce the output. This\n is the reverse of SpaceToBatch (see `tf.space_to_batch`).\n\n Args:\n input: A N-D `Tensor` with shape `input_shape = [batch] + spatial_shape +\n remaining_shape`, where `spatial_shape` has M dimensions.\n block_shape: A 1-D `Tensor` with shape [M]. Must be one of the following\n types: `int32`, `int64`. All values must be >= 1. For backwards\n compatibility with TF 1.0, this parameter may be an int, in which case it\n is converted to\n `numpy.array([block_shape, block_shape],\n dtype=numpy.int64)`.\n crops: A 2-D `Tensor` with shape `[M, 2]`. Must be one of the\n following types: `int32`, `int64`. All values must be >= 0.\n `crops[i] = [crop_start, crop_end]` specifies the amount to crop from\n input dimension `i + 1`, which corresponds to spatial dimension `i`.\n It is required that\n `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\n This operation is equivalent to the following steps:\n 1. Reshape `input` to `reshaped` of shape: [block_shape[0], ...,\n block_shape[M-1], batch / prod(block_shape), input_shape[1], ...,\n input_shape[N-1]]\n 2. Permute dimensions of `reshaped` to produce `permuted` of shape\n [batch / prod(block_shape), input_shape[1], block_shape[0], ...,\n input_shape[M], block_shape[M-1], input_shape[M+1],\n ..., input_shape[N-1]]\n 3. Reshape `permuted` to produce `reshaped_permuted` of shape\n [batch / prod(block_shape), input_shape[1] * block_shape[0], ...,\n input_shape[M] * block_shape[M-1], input_shape[M+1], ...,\n input_shape[N-1]]\n 4. Crop the start and end of dimensions `[1, ..., M]` of\n `reshaped_permuted` according to `crops` to produce the output\n of shape:\n [batch / prod(block_shape), input_shape[1] *\n block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] *\n block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1],\n ..., input_shape[N-1]]\n name: A name for the operation (optional).\n\n Examples:\n\n (1) For the following input of shape `[4, 1, 1, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n [[[[1]]],\n [[[2]]],\n [[[3]]],\n [[[4]]]]\n ```\n\n The output tensor has shape `[1, 2, 2, 1]` and value:\n\n ```\n x = [[[[1], [2]],\n [[3], [4]]]]\n ```\n\n (2) For the following input of shape `[4, 1, 1, 3]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n [[[1, 2, 3]],\n [[4, 5, 6]],\n [[7, 8, 9]],\n [[10, 11, 12]]]\n ```\n\n The output tensor has shape `[1, 2, 2, 3]` and value:\n\n ```python\n x = [[[[1, 2, 3], [4, 5, 6 ]],\n [[7, 8, 9], [10, 11, 12]]]]\n ```\n\n (3) For the following\n input of shape `[4, 2, 2, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n x = [[[[1], [3]], [[ 9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n ```\n\n The output tensor has shape `[1, 4, 4, 1]` and value:\n\n ```python\n x = [[[1], [2], [ 3], [ 4]],\n [[5], [6], [ 7], [ 8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]\n ```\n\n (4) For the following input of shape\n `[8, 1, 3, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`:\n\n ```python\n x = [[[[0], [ 1], [ 3]]],\n [[[0], [ 9], [11]]],\n [[[0], [ 2], [ 4]]],\n [[[0], [10], [12]]],\n [[[0], [ 5], [ 7]]],\n [[[0], [13], [15]]],\n [[[0], [ 6], [ 8]]],\n [[[0], [14], [16]]]]\n ```\n\n The output tensor has shape `[2, 2, 4, 1]` and value:\n\n ```python\n x = [[[[ 1], [ 2], [ 3], [ 4]],\n [[ 5], [ 6], [ 7], [ 8]]],\n [[[ 9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n \"\"\"\n if isinstance(block_shape, int):\n block_shape = np.array([block_shape, block_shape], dtype=np.int64)\n\n return batch_to_space_nd(\n input=input, block_shape=block_shape, crops=crops, name=name)\n\n\n@tf_export(\"one_hot\")\[email protected]_dispatch_support\ndef one_hot(indices,\n depth,\n on_value=None,\n off_value=None,\n axis=None,\n dtype=None,\n name=None):\n \"\"\"Returns a one-hot tensor.\n\n See also `tf.fill`, `tf.eye`.\n\n The locations represented by indices in `indices` take value `on_value`,\n while all other locations take value `off_value`.\n\n `on_value` and `off_value` must have matching data types. If `dtype` is also\n provided, they must be the same data type as specified by `dtype`.\n\n If `on_value` is not provided, it will default to the value `1` with type\n `dtype`\n\n If `off_value` is not provided, it will default to the value `0` with type\n `dtype`\n\n If the input `indices` is rank `N`, the output will have rank `N+1`. The\n new axis is created at dimension `axis` (default: the new axis is appended\n at the end).\n\n If `indices` is a scalar the output shape will be a vector of length `depth`\n\n If `indices` is a vector of length `features`, the output shape will be:\n\n ```\n features x depth if axis == -1\n depth x features if axis == 0\n ```\n\n If `indices` is a matrix (batch) with shape `[batch, features]`, the output\n shape will be:\n\n ```\n batch x features x depth if axis == -1\n batch x depth x features if axis == 1\n depth x batch x features if axis == 0\n ```\n\n If `indices` is a RaggedTensor, the 'axis' argument must be positive and refer\n to a non-ragged axis. The output will be equivalent to applying 'one_hot' on\n the values of the RaggedTensor, and creating a new RaggedTensor from the\n result.\n\n If `dtype` is not provided, it will attempt to assume the data type of\n `on_value` or `off_value`, if one or both are passed in. If none of\n `on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the\n value `tf.float32`.\n\n Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,\n etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.\n\n For example:\n\n ```python\n indices = [0, 1, 2]\n depth = 3\n tf.one_hot(indices, depth) # output: [3 x 3]\n # [[1., 0., 0.],\n # [0., 1., 0.],\n # [0., 0., 1.]]\n\n indices = [0, 2, -1, 1]\n depth = 3\n tf.one_hot(indices, depth,\n on_value=5.0, off_value=0.0,\n axis=-1) # output: [4 x 3]\n # [[5.0, 0.0, 0.0], # one_hot(0)\n # [0.0, 0.0, 5.0], # one_hot(2)\n # [0.0, 0.0, 0.0], # one_hot(-1)\n # [0.0, 5.0, 0.0]] # one_hot(1)\n\n indices = [[0, 2], [1, -1]]\n depth = 3\n tf.one_hot(indices, depth,\n on_value=1.0, off_value=0.0,\n axis=-1) # output: [2 x 2 x 3]\n # [[[1.0, 0.0, 0.0], # one_hot(0)\n # [0.0, 0.0, 1.0]], # one_hot(2)\n # [[0.0, 1.0, 0.0], # one_hot(1)\n # [0.0, 0.0, 0.0]]] # one_hot(-1)\n\n indices = tf.ragged.constant([[0, 1], [2]])\n depth = 3\n tf.one_hot(indices, depth) # output: [2 x None x 3]\n # [[[1., 0., 0.],\n # [0., 1., 0.]],\n # [[0., 0., 1.]]]\n ```\n\n Args:\n indices: A `Tensor` of indices.\n depth: A scalar defining the depth of the one hot dimension.\n on_value: A scalar defining the value to fill in output when `indices[j]\n = i`. (default: 1)\n off_value: A scalar defining the value to fill in output when `indices[j]\n != i`. (default: 0)\n axis: The axis to fill (default: -1, a new inner-most axis).\n dtype: The data type of the output tensor.\n name: A name for the operation (optional).\n\n Returns:\n output: The one-hot tensor.\n\n Raises:\n TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`\n TypeError: If dtype of `on_value` and `off_value` don't match one another\n \"\"\"\n with ops.name_scope(\n name, \"one_hot\",\n [indices, depth, on_value, off_value, axis, dtype]) as name:\n on_exists = on_value is not None\n off_exists = off_value is not None\n\n if on_exists:\n on_value = ops.convert_to_tensor(on_value, dtype_hint=dtype)\n if off_exists:\n off_value = ops.convert_to_tensor(off_value, dtype_hint=dtype)\n\n on_dtype = on_value.dtype.base_dtype if on_exists else None\n off_dtype = off_value.dtype.base_dtype if off_exists else None\n\n if on_exists or off_exists:\n if dtype is not None:\n # Ensure provided on_value and/or off_value match dtype\n if on_exists and on_dtype != dtype:\n raise TypeError(\"dtype {0} of on_value does not match \"\n \"dtype parameter {1}\".format(on_dtype, dtype))\n if off_exists and off_dtype != dtype:\n raise TypeError(\"dtype {0} of off_value does not match \"\n \"dtype parameter {1}\".format(off_dtype, dtype))\n else:\n # dtype not provided: automatically assign it\n dtype = on_dtype if on_exists else off_dtype\n elif dtype is None:\n # None of on_value, off_value, or dtype provided. Default dtype to float32\n dtype = dtypes.float32\n\n if not on_exists:\n # on_value not provided: assign to value 1 of type dtype\n on_value = ops.convert_to_tensor(1, dtype, name=\"on_value\")\n on_dtype = dtype\n if not off_exists:\n # off_value not provided: assign to value 0 of type dtype\n off_value = ops.convert_to_tensor(0, dtype, name=\"off_value\")\n off_dtype = dtype\n\n if on_dtype != off_dtype:\n raise TypeError(\"dtype {0} of on_value does not match \"\n \"dtype {1} of off_value\".format(on_dtype, off_dtype))\n\n return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,\n name)\n\n\ndef _all_dimensions(x):\n \"\"\"Returns a 1D-tensor listing all dimensions in x.\"\"\"\n # Fast path: avoid creating Rank and Range ops if ndims is known.\n if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:\n return constant_op.constant(\n np.arange(x.get_shape().ndims), dtype=dtypes.int32)\n if (isinstance(x, sparse_tensor.SparseTensor) and\n x.dense_shape.get_shape().is_fully_defined()):\n r = x.dense_shape.get_shape().dims[0].value # sparse.dense_shape is 1-D.\n return constant_op.constant(np.arange(r), dtype=dtypes.int32)\n\n # Otherwise, we rely on `range` and `rank` to do the right thing at runtime.\n return gen_math_ops._range(0, rank(x), 1)\n\n\n@tf_export(\"sequence_mask\")\[email protected]_dispatch_support\ndef sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):\n \"\"\"Returns a mask tensor representing the first N positions of each cell.\n\n If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has\n dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with\n\n ```\n mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])\n ```\n\n Examples:\n\n ```python\n tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],\n # [True, True, True, False, False],\n # [True, True, False, False, False]]\n\n tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],\n # [True, True, True]],\n # [[True, True, False],\n # [False, False, False]]]\n ```\n\n Args:\n lengths: integer tensor, all its values <= maxlen.\n maxlen: scalar integer tensor, size of last dimension of returned tensor.\n Default is the maximum value in `lengths`.\n dtype: output type of the resulting tensor.\n name: name of the op.\n\n Returns:\n A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.\n Raises:\n ValueError: if `maxlen` is not a scalar.\n \"\"\"\n with ops.name_scope(name, \"SequenceMask\", [lengths, maxlen]):\n lengths = ops.convert_to_tensor(lengths)\n\n if maxlen is None:\n maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))\n maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)\n else:\n maxlen = ops.convert_to_tensor(maxlen)\n if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:\n raise ValueError(\"maxlen must be scalar for sequence_mask\")\n\n # The basic idea is to compare a range row vector of size maxlen:\n # [0, 1, 2, 3, 4]\n # to length as a matrix with 1 column: [[1], [3], [2]].\n # Because of broadcasting on both arguments this comparison results\n # in a matrix of size (len(lengths), maxlen)\n row_vector = gen_math_ops._range(\n constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))\n # Since maxlen >= max(lengths), it is safe to use maxlen as a cast\n # authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.\n matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)\n result = row_vector < matrix\n\n if dtype is None or result.dtype.base_dtype == dtype.base_dtype:\n return result\n else:\n return gen_math_ops.cast(result, dtype)\n\n\n@tf_export(v1=[\"squeeze\"])\[email protected]_dispatch_support\[email protected]_args(None, \"Use the `axis` argument instead\",\n \"squeeze_dims\")\ndef squeeze(input, axis=None, name=None, squeeze_dims=None):\n # pylint: disable=redefined-builtin\n \"\"\"Removes dimensions of size 1 from the shape of a tensor.\n\n Given a tensor `input`, this operation returns a tensor of the same type with\n all dimensions of size 1 removed. If you don't want to remove all size 1\n dimensions, you can remove specific size 1 dimensions by specifying\n `axis`.\n\n For example:\n\n >>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n >>> t = tf.ones([1, 2, 1, 3, 1, 1])\n >>> print(tf.shape(tf.squeeze(t)).numpy())\n [2 3]\n\n Or, to remove specific size 1 dimensions:\n\n >>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n >>> t = tf.ones([1, 2, 1, 3, 1, 1])\n >>> print(tf.shape(tf.squeeze(t, [2, 4])).numpy())\n [1 2 3 1]\n\n Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`\n time, where `N` is the number of elements in the squeezed dimensions.\n\n Args:\n input: A `Tensor`. The `input` to squeeze.\n axis: An optional list of `ints`. Defaults to `[]`. If specified, only\n squeezes the dimensions listed. The dimension index starts at 0. It is an\n error to squeeze a dimension that is not 1. Must be in the range\n `[-rank(input), rank(input))`. Must be specified if `input` is a\n `RaggedTensor`.\n name: A name for the operation (optional).\n squeeze_dims: Deprecated keyword argument that is now axis.\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Contains the same data as `input`, but has one or more dimensions of\n size 1 removed.\n\n Raises:\n ValueError: When both `squeeze_dims` and `axis` are specified.\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis, \"squeeze_dims\",\n squeeze_dims)\n if np.isscalar(axis):\n axis = [axis]\n return gen_array_ops.squeeze(input, axis, name)\n\n\n@tf_export(\"squeeze\", v1=[])\[email protected]_dispatch_support\ndef squeeze_v2(input, axis=None, name=None):\n \"\"\"Removes dimensions of size 1 from the shape of a tensor.\n\n Given a tensor `input`, this operation returns a tensor of the same type with\n all dimensions of size 1 removed. If you don't want to remove all size 1\n dimensions, you can remove specific size 1 dimensions by specifying\n `axis`.\n\n For example:\n\n ```python\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n tf.shape(tf.squeeze(t)) # [2, 3]\n ```\n\n Or, to remove specific size 1 dimensions:\n\n ```python\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]\n ```\n\n Unlike the older op `tf.compat.v1.squeeze`, this op does not accept a\n deprecated `squeeze_dims` argument.\n\n Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`\n time, where `N` is the number of elements in the squeezed dimensions.\n\n Args:\n input: A `Tensor`. The `input` to squeeze.\n axis: An optional list of `ints`. Defaults to `[]`. If specified, only\n squeezes the dimensions listed. The dimension index starts at 0. It is an\n error to squeeze a dimension that is not 1. Must be in the range\n `[-rank(input), rank(input))`. Must be specified if `input` is a\n `RaggedTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Contains the same data as `input`, but has one or more dimensions of\n size 1 removed.\n\n Raises:\n ValueError: The input cannot be converted to a tensor, or the specified\n axis cannot be squeezed.\n \"\"\"\n # pylint: disable=redefined-builtin\n return squeeze(input, axis, name)\n\n\n@tf_export(v1=[\"where\"])\[email protected]_dispatch_support\ndef where(condition, x=None, y=None, name=None):\n \"\"\"Return the elements, either from `x` or `y`, depending on the `condition`.\n\n If both `x` and `y` are None, then this operation returns the coordinates of\n true elements of `condition`. The coordinates are returned in a 2-D tensor\n where the first dimension (rows) represents the number of true elements, and\n the second dimension (columns) represents the coordinates of the true\n elements. Keep in mind, the shape of the output tensor can vary depending on\n how many true values there are in input. Indices are output in row-major\n order.\n\n If both non-None, `x` and `y` must have the same shape.\n The `condition` tensor must be a scalar if `x` and `y` are scalar.\n If `x` and `y` are tensors of higher rank, then `condition` must be either a\n vector with size matching the first dimension of `x`, or must have the same\n shape as `x`.\n\n The `condition` tensor acts as a mask that chooses, based on the value at each\n element, whether the corresponding element / row in the output should be taken\n from `x` (if true) or `y` (if false).\n\n If `condition` is a vector and `x` and `y` are higher rank matrices, then it\n chooses which row (outer dimension) to copy from `x` and `y`. If `condition`\n has the same shape as `x` and `y`, then it chooses which element to copy from\n `x` and `y`.\n\n Args:\n condition: A `Tensor` of type `bool`\n x: A Tensor which may have the same shape as `condition`. If `condition` is\n rank 1, `x` may have higher rank, but its first dimension must match the\n size of `condition`.\n y: A `tensor` with the same shape and type as `x`.\n name: A name of the operation (optional)\n\n Returns:\n A `Tensor` with the same type and shape as `x`, `y` if they are non-None.\n Otherwise, a `Tensor` with shape `(num_true, rank(condition))`.\n\n Raises:\n ValueError: When exactly one of `x` or `y` is non-None.\n \"\"\"\n if x is None and y is None:\n with ops.name_scope(name, \"Where\", [condition]) as name:\n condition = ops.convert_to_tensor(\n condition, preferred_dtype=dtypes.bool, name=\"condition\")\n return gen_array_ops.where(condition=condition, name=name)\n elif x is not None and y is not None:\n return gen_math_ops.select(condition=condition, x=x, y=y, name=name)\n else:\n raise ValueError(\"x and y must both be non-None or both be None.\")\n\n\n@tf_export(\"where\", v1=[\"where_v2\"])\[email protected]_dispatch_support\ndef where_v2(condition, x=None, y=None, name=None):\n \"\"\"Return the elements where `condition` is `True` (multiplexing `x` and `y`).\n\n This operator has two modes: in one mode both `x` and `y` are provided, in\n another mode neither are provided. `condition` is always expected to be a\n `tf.Tensor` of type `bool`.\n\n #### Retrieving indices of `True` elements\n\n If `x` and `y` are not provided (both are None):\n\n `tf.where` will return the indices of `condition` that are `True`, in\n the form of a 2-D tensor with shape (n, d).\n (Where n is the number of matching indices in `condition`,\n and d is the number of dimensions in `condition`).\n\n Indices are output in row-major order.\n\n >>> tf.where([True, False, False, True])\n <tf.Tensor: shape=(2, 1), dtype=int64, numpy=\n array([[0],\n [3]])>\n\n >>> tf.where([[True, False], [False, True]])\n <tf.Tensor: shape=(2, 2), dtype=int64, numpy=\n array([[0, 0],\n [1, 1]])>\n\n >>> tf.where([[[True, False], [False, True], [True, True]]])\n <tf.Tensor: shape=(4, 3), dtype=int64, numpy=\n array([[0, 0, 0],\n [0, 1, 1],\n [0, 2, 0],\n [0, 2, 1]])>\n\n #### Multiplexing between `x` and `y`\n\n If `x` and `y` are provided (both have non-None values):\n\n `tf.where` will choose an output shape from the shapes of `condition`, `x`,\n and `y` that all three shapes are\n [broadcastable](https://docs.scipy.org/doc/numpy/reference/ufuncs.html) to.\n\n The `condition` tensor acts as a mask that chooses whether the corresponding\n element / row in the output should be taken from `x`\n (if the element in `condition is True) or `y` (if it is false).\n\n >>> tf.where([True, False, False, True], [1,2,3,4], [100,200,300,400])\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 200, 300, 4],\n dtype=int32)>\n >>> tf.where([True, False, False, True], [1,2,3,4], [100])\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 100, 100, 4],\n dtype=int32)>\n >>> tf.where([True, False, False, True], [1,2,3,4], 100)\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 100, 100, 4],\n dtype=int32)>\n >>> tf.where([True, False, False, True], 1, 100)\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 100, 100, 1],\n dtype=int32)>\n\n >>> tf.where(True, [1,2,3,4], 100)\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([1, 2, 3, 4],\n dtype=int32)>\n >>> tf.where(False, [1,2,3,4], 100)\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([100, 100, 100, 100],\n dtype=int32)>\n\n Args:\n condition: A `tf.Tensor` of type `bool`\n x: If provided, a Tensor which is of the same type as `y`, and has a shape\n broadcastable with `condition` and `y`.\n y: If provided, a Tensor which is of the same type as `y`, and has a shape\n broadcastable with `condition` and `x`.\n name: A name of the operation (optional).\n\n Returns:\n If `x` and `y` are provided:\n A `Tensor` with the same type as `x` and `y`, and shape that\n is broadcast from `condition`, `x`, and `y`.\n Otherwise, a `Tensor` with shape `(num_true, dim_size(condition))`.\n\n Raises:\n ValueError: When exactly one of `x` or `y` is non-None, or the shapes\n are not all broadcastable.\n \"\"\"\n if x is None and y is None:\n with ops.name_scope(name, \"Where\", [condition]) as name:\n condition = ops.convert_to_tensor(\n condition, preferred_dtype=dtypes.bool, name=\"condition\")\n return gen_array_ops.where(condition=condition, name=name)\n elif x is not None and y is not None:\n return gen_math_ops.select_v2(condition=condition, t=x, e=y, name=name)\n else:\n raise ValueError(\"x and y must both be non-None or both be None.\")\n\n\n# pylint: disable=redefined-builtin\n@tf_export(v1=[\"reverse_sequence\"])\[email protected]_args(None,\n \"seq_dim is deprecated, use seq_axis instead\",\n \"seq_dim\")\[email protected]_args(None,\n \"batch_dim is deprecated, use batch_axis instead\",\n \"batch_dim\")\ndef reverse_sequence(input,\n seq_lengths,\n seq_axis=None,\n batch_axis=None,\n name=None,\n seq_dim=None,\n batch_dim=None):\n \"\"\"Reverses variable length slices.\n\n This op first slices `input` along the dimension `batch_axis`, and for\n each slice `i`, reverses the first `seq_lengths[i]` elements along the\n dimension `seq_axis`.\n\n The elements of `seq_lengths` must obey `seq_lengths[i] <=\n input.dims[seq_axis]`, and `seq_lengths` must be a vector of length\n `input.dims[batch_axis]`.\n\n The output slice `i` along dimension `batch_axis` is then given by\n input slice `i`, with the first `seq_lengths[i]` slices along\n dimension `seq_axis` reversed.\n\n Example usage:\n\n >>> seq_lengths = [7, 2, 3, 5]\n >>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0],\n ... [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]]\n >>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0)\n >>> output\n <tf.Tensor: shape=(4, 8), dtype=int32, numpy=\n array([[0, 0, 5, 4, 3, 2, 1, 0],\n [2, 1, 0, 0, 0, 0, 0, 0],\n [3, 2, 1, 4, 0, 0, 0, 0],\n [5, 4, 3, 2, 1, 6, 7, 8]], dtype=int32)>\n\n Args:\n input: A `Tensor`. The input to reverse.\n seq_lengths: A `Tensor`. Must be one of the following types: `int32`,\n `int64`. 1-D with length `input.dims(batch_axis)` and `max(seq_lengths) <=\n input.dims(seq_axis)`\n seq_axis: An `int`. The dimension which is partially reversed.\n batch_axis: An optional `int`. Defaults to `0`. The dimension along which\n reversal is performed.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor. Has the same type as input.\n \"\"\"\n seq_axis = deprecation.deprecated_argument_lookup(\"seq_axis\", seq_axis,\n \"seq_dim\", seq_dim)\n batch_axis = deprecation.deprecated_argument_lookup(\"batch_axis\", batch_axis,\n \"batch_dim\", batch_dim)\n return gen_array_ops.reverse_sequence(\n input=input,\n seq_lengths=seq_lengths,\n seq_dim=seq_axis,\n batch_dim=batch_axis,\n name=name)\n\n\n@tf_export(\"reverse_sequence\", v1=[])\ndef reverse_sequence_v2(input,\n seq_lengths,\n seq_axis=None,\n batch_axis=None,\n name=None):\n return gen_array_ops.reverse_sequence(\n input=input,\n seq_lengths=seq_lengths,\n seq_dim=seq_axis,\n batch_dim=batch_axis,\n name=name)\n\nreverse_sequence_v2.__doc__ = reverse_sequence.__doc__\n# pylint: enable=redefined-builtin\n\n\n@tf_export(v1=[\"gather\"])\[email protected]_dispatch_support\ndef gather(params,\n indices,\n validate_indices=None,\n name=None,\n axis=None,\n batch_dims=0): # pylint: disable=g-doc-args\n r\"\"\"Gather slices from params axis `axis` according to indices.\n\n Gather slices from params axis `axis` according to `indices`. `indices` must\n be an integer tensor of any dimension (usually 0-D or 1-D).\n\n For 0-D (scalar) `indices`:\n\n $$\\begin{align*}\n output[p_0, ..., p_{axis-1}, && &&& p_{axis + 1}, ..., p_{N-1}] = \\\\\n params[p_0, ..., p_{axis-1}, && indices, &&& p_{axis + 1}, ..., p_{N-1}]\n \\end{align*}$$\n\n Where *N* = `ndims(params)`.\n\n For 1-D (vector) `indices` with `batch_dims=0`:\n\n $$\\begin{align*}\n output[p_0, ..., p_{axis-1}, && &i, &&p_{axis + 1}, ..., p_{N-1}] =\\\\\n params[p_0, ..., p_{axis-1}, && indices[&i], &&p_{axis + 1}, ..., p_{N-1}]\n \\end{align*}$$\n\n In the general case, produces an output tensor where:\n\n $$\\begin{align*}\n output[p_0, &..., p_{axis-1}, &\n &i_{B}, ..., i_{M-1}, &\n p_{axis + 1}, &..., p_{N-1}] = \\\\\n params[p_0, &..., p_{axis-1}, &\n indices[p_0, ..., p_{B-1}, &i_{B}, ..., i_{M-1}], &\n p_{axis + 1}, &..., p_{N-1}]\n \\end{align*}$$\n\n Where *N* = `ndims(params)`, *M* = `ndims(indices)`, and *B* = `batch_dims`.\n Note that `params.shape[:batch_dims]` must be identical to\n `indices.shape[:batch_dims]`.\n\n The shape of the output tensor is:\n\n > `output.shape = params.shape[:axis] + indices.shape[batch_dims:] +\n > params.shape[axis + 1:]`.\n\n Note that on CPU, if an out of bound index is found, an error is returned.\n On GPU, if an out of bound index is found, a 0 is stored in the corresponding\n output value.\n\n See also `tf.gather_nd`.\n\n <div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\n <img style=\"width:100%\" src=\"https://www.tensorflow.org/images/Gather.png\"\n alt>\n </div>\n\n Args:\n params: The `Tensor` from which to gather values. Must be at least rank\n `axis + 1`.\n indices: The index `Tensor`. Must be one of the following types: `int32`,\n `int64`. Must be in range `[0, params.shape[axis])`.\n validate_indices: Deprecated, does nothing.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The\n `axis` in `params` to gather `indices` from. Must be greater than or equal\n to `batch_dims`. Defaults to the first non-batch dimension. Supports\n negative indexes.\n batch_dims: An `integer`. The number of batch dimensions. Must be less\n than or equal to `rank(indices)`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `params`.\n \"\"\"\n del validate_indices\n\n if axis is None:\n axis = batch_dims\n if tensor_util.constant_value(axis) != 0:\n return gen_array_ops.gather_v2(\n params, indices, axis, batch_dims=batch_dims, name=name)\n try:\n # TODO(apassos) find a less bad way of detecting resource variables\n # without introducing a circular dependency.\n return params.sparse_read(indices, name=name)\n except AttributeError:\n return gen_array_ops.gather_v2(params, indices, axis, name=name)\n\n\n@tf_export(\"gather\", v1=[])\[email protected]_dispatch_support\ndef gather_v2(params,\n indices,\n validate_indices=None,\n axis=None,\n batch_dims=0,\n name=None):\n return gather(\n params,\n indices,\n validate_indices=validate_indices,\n name=name,\n axis=axis,\n batch_dims=batch_dims)\n\n\ngather_v2.__doc__ = gather.__doc__\n\n\n@tf_export(v1=[\"batch_gather\"])\[email protected]_dispatch_support\[email protected](\n \"2017-10-25\", \"`tf.batch_gather` is deprecated, please use `tf.gather` \"\n \"with `batch_dims=-1` instead.\") # pylint: disable=missing-docstring\ndef batch_gather(params, indices, name=None):\n \"\"\"Gather slices from params according to indices with leading batch dims.\"\"\"\n with ops.name_scope(name, \"BatchGather\", [params, indices]):\n indices = ops.convert_to_tensor(indices, name=\"indices\")\n params = ops.convert_to_tensor(params, name=\"params\")\n if indices.shape.ndims is None:\n raise ValueError(\n \"batch_gather does not allow indices with unknown shape.\")\n return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)\n\n\ndef _batch_gather(params, indices, batch_dims, axis=None):\n r\"\"\"Gather slices from params according to indices with leading batch dims.\n\n This operation assumes that the leading `batch_dims` dimensions of `indices`\n and `params` are batch dimensions; and performs a `tf.gather` operation within\n each batch. (If `batch_dims` is not specified, then it defaults to\n `rank(indices)-1`.) In the case in which `batch_dims==0`, this operation\n is equivalent to `tf.gather`.\n\n Args:\n params: A Tensor. The tensor from which to gather values.\n indices: A Tensor. Must be one of the following types: int32, int64. Index\n tensor. Must be in range `[0, params.shape[batch_dims]]`.\n batch_dims: An integer or none. The number of batch dimensions. Must be\n less than `rank(indices)`. Defaults to `rank(indices) - 1` if None.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The\n `axis` in `params` to gather `indices` from. Must be greater than or equal\n to `batch_dims`. Defaults to the first non-batch dimension. Supports\n negative indexes.\n\n Returns:\n A Tensor. Has the same type as `params`.\n\n Raises:\n ValueError: if `indices` has an unknown shape.\n \"\"\"\n if batch_dims is not None and not isinstance(batch_dims, int):\n raise TypeError(\"batch_dims must be an int; got %r\" % (batch_dims,))\n indices = ops.convert_to_tensor(indices, name=\"indices\")\n params = ops.convert_to_tensor(params, name=\"params\")\n\n indices_ndims = indices.shape.ndims\n if indices_ndims is None:\n raise ValueError(\"tf.gather does not allow indices with unknown \"\n \"rank when batch_dims is specified.\")\n if batch_dims is None:\n batch_dims = indices_ndims - 1\n if batch_dims < 0:\n batch_dims += indices_ndims\n if batch_dims < 0 or batch_dims >= indices_ndims:\n raise ValueError(\"batch_dims = %d must be less than rank(indices) = %d\" %\n (batch_dims, indices_ndims))\n if params.shape.ndims is not None and batch_dims >= params.shape.ndims:\n raise ValueError(\"batch_dims = %d must be less than rank(params) = %d\" %\n (batch_dims, params.shape.ndims))\n\n # Handle axis by transposing the axis dimension to be the first non-batch\n # dimension, recursively calling batch_gather with axis=0, and then\n # transposing the result to put the pre-axis dimensions before the indices\n # dimensions.\n if axis is not None and axis != batch_dims:\n # Adjust axis to be positive.\n if not isinstance(axis, int):\n axis = tf.where(axis < 0, axis + array_ops.rank(params), axis)\n elif axis < 0 and params.shape.ndims is None:\n axis = axis + array_ops.rank(params)\n else:\n if (axis < -params.shape.ndims) or (axis >= params.shape.ndims):\n raise ValueError(\"axis (%d) out of range [%d, %d)\" %\n (axis, -params.shape.ndims, params.shape.ndims))\n if axis < 0:\n axis += params.shape.ndims\n if axis < batch_dims:\n raise ValueError(\"batch_dims = %d must be less than or equal to \"\n \"axis = %d\" % (batch_dims, axis))\n\n # Move params[axis] up to params[batch_dims].\n perm = [\n list(range(batch_dims)), [axis],\n gen_math_ops._range(batch_dims, axis, 1),\n gen_math_ops._range(axis + 1, rank(params), 1)\n ]\n params = transpose(params, concat(perm, axis=0))\n\n result = _batch_gather(params, indices, batch_dims=batch_dims)\n\n # Move the result dimensions corresponding to params[batch_dims:axis]\n # to just before the dimensions corresponding to indices[batch_dims:].\n params_start = indices_ndims + axis - batch_dims\n perm = [\n list(range(batch_dims)),\n gen_math_ops._range(indices_ndims, params_start, 1),\n list(range(batch_dims, indices_ndims)),\n gen_math_ops._range(params_start, rank(result), 1)\n ]\n return transpose(result, perm=concat(perm, axis=0))\n\n indices_shape = shape(indices)\n params_shape = shape(params)\n batch_indices = indices\n indices_dtype = indices.dtype.base_dtype\n accum_dim_value = ones((), dtype=indices_dtype)\n # Use correct type for offset index computation\n casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)\n for dim in range(batch_dims, 0, -1):\n dim_value = casted_params_shape[dim - 1]\n accum_dim_value *= casted_params_shape[dim]\n start = zeros((), dtype=indices_dtype)\n step = ones((), dtype=indices_dtype)\n dim_indices = gen_math_ops._range(start, dim_value, step)\n dim_indices *= accum_dim_value\n dim_shape = stack(\n [1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0)\n batch_indices += reshape(dim_indices, dim_shape)\n\n flat_indices = reshape(batch_indices, [-1])\n outer_shape = params_shape[batch_dims + 1:]\n flat_inner_shape = gen_math_ops.prod(params_shape[:batch_dims + 1], [0],\n False)\n\n flat_params = reshape(params, concat([[flat_inner_shape], outer_shape],\n axis=0))\n flat_result = gather(flat_params, flat_indices)\n result = reshape(flat_result, concat([indices_shape, outer_shape], axis=0))\n final_shape = indices.get_shape()[:batch_dims].merge_with(\n params.get_shape()[:batch_dims])\n final_shape = final_shape.concatenate(indices.get_shape().dims[batch_dims:])\n final_shape = final_shape.concatenate(params.get_shape()[batch_dims + 1:])\n result.set_shape(final_shape)\n return result\n\n\n@tf_export(v1=[\"gather_nd\", \"manip.gather_nd\"])\[email protected]_dispatch_support\n@deprecated_endpoints(\"manip.gather_nd\")\ndef gather_nd(params, indices, name=None, batch_dims=0):\n r\"\"\"Gather slices from `params` into a Tensor with shape specified by `indices`.\n\n `indices` is an K-dimensional integer tensor, best thought of as a\n (K-1)-dimensional tensor of indices into `params`, where each element defines\n a slice of `params`:\n\n output[\\\\(i_0, ..., i_{K-2}\\\\)] = params[indices[\\\\(i_0, ..., i_{K-2}\\\\)]]\n\n Whereas in `tf.gather` `indices` defines slices into the first\n dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the\n first `N` dimensions of `params`, where `N = indices.shape[-1]`.\n\n The last dimension of `indices` can be at most the rank of\n `params`:\n\n indices.shape[-1] <= params.rank\n\n The last dimension of `indices` corresponds to elements\n (if `indices.shape[-1] == params.rank`) or slices\n (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`\n of `params`. The output tensor has shape\n\n indices.shape[:-1] + params.shape[indices.shape[-1]:]\n\n Additionally both 'params' and 'indices' can have M leading batch\n dimensions that exactly match. In this case 'batch_dims' must be M.\n\n Note that on CPU, if an out of bound index is found, an error is returned.\n On GPU, if an out of bound index is found, a 0 is stored in the\n corresponding output value.\n\n Some examples below.\n\n Simple indexing into a matrix:\n\n ```python\n indices = [[0, 0], [1, 1]]\n params = [['a', 'b'], ['c', 'd']]\n output = ['a', 'd']\n ```\n\n Slice indexing into a matrix:\n\n ```python\n indices = [[1], [0]]\n params = [['a', 'b'], ['c', 'd']]\n output = [['c', 'd'], ['a', 'b']]\n ```\n\n Indexing into a 3-tensor:\n\n ```python\n indices = [[1]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[['a1', 'b1'], ['c1', 'd1']]]\n\n\n indices = [[0, 1], [1, 0]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['c0', 'd0'], ['a1', 'b1']]\n\n\n indices = [[0, 0, 1], [1, 0, 1]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = ['b0', 'b1']\n ```\n\n The examples below are for the case when only indices have leading extra\n dimensions. If both 'params' and 'indices' have leading batch dimensions, use\n the 'batch_dims' parameter to run gather_nd in batch mode.\n\n Batched indexing into a matrix:\n\n ```python\n indices = [[[0, 0]], [[0, 1]]]\n params = [['a', 'b'], ['c', 'd']]\n output = [['a'], ['b']]\n ```\n\n Batched slice indexing into a matrix:\n\n ```python\n indices = [[[1]], [[0]]]\n params = [['a', 'b'], ['c', 'd']]\n output = [[['c', 'd']], [['a', 'b']]]\n ```\n\n Batched indexing into a 3-tensor:\n\n ```python\n indices = [[[1]], [[0]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[[['a1', 'b1'], ['c1', 'd1']]],\n [[['a0', 'b0'], ['c0', 'd0']]]]\n\n indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[['c0', 'd0'], ['a1', 'b1']],\n [['a0', 'b0'], ['c1', 'd1']]]\n\n\n indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['b0', 'b1'], ['d0', 'c1']]\n ```\n\n Examples with batched 'params' and 'indices':\n\n ```python\n batch_dims = 1\n indices = [[1], [0]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['c0', 'd0'], ['a1', 'b1']]\n\n batch_dims = 1\n indices = [[[1]], [[0]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[['c0', 'd0']], [['a1', 'b1']]]\n\n batch_dims = 1\n indices = [[[1, 0]], [[0, 1]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['c0'], ['b1']]\n ```\n\n See also `tf.gather`.\n\n Args:\n params: A `Tensor`. The tensor from which to gather values.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n name: A name for the operation (optional).\n batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.\n\n Returns:\n A `Tensor`. Has the same type as `params`.\n \"\"\"\n batch_dims_ = tensor_util.constant_value(batch_dims)\n if batch_dims_ is not None:\n batch_dims = int(batch_dims_)\n if batch_dims == 0:\n try:\n # TODO(apassos) find a less bad way of detecting resource variables\n # without introducing a circular dependency.\n return params.gather_nd(indices, name=name)\n except AttributeError:\n return gen_array_ops.gather_nd(params, indices, name=name)\n else:\n return batch_gather_nd(params, indices, batch_dims=batch_dims, name=name)\n\n\n@tf_export(\"gather_nd\", v1=[])\[email protected]_dispatch_support\ndef gather_nd_v2(params, indices, batch_dims=0, name=None):\n return gather_nd(params, indices, name=name, batch_dims=batch_dims)\n\n\ngather_nd_v2.__doc__ = gather_nd.__doc__\n\n\ndef batch_gather_nd(params, indices, batch_dims, name=None):\n \"\"\"gather_nd implementation with batch support.\"\"\"\n with ops.name_scope(name, \"BatchGatherND\", [params, indices]):\n indices = ops.convert_to_tensor(indices, name=\"indices\")\n params = ops.convert_to_tensor(params, name=\"params\")\n\n if not isinstance(batch_dims, int):\n raise TypeError(\"batch_dims must be an int; got %r\" % (batch_dims,))\n if batch_dims < 0:\n raise ValueError(\"tf.gather_nd does not allow negative batch_dims.\")\n params_ndims = params.shape.ndims\n indices_ndims = indices.shape.ndims\n if indices_ndims is not None and batch_dims >= indices_ndims:\n raise ValueError(\"batch_dims = %d must be less than rank(indices) = %d\" %\n (batch_dims, indices_ndims))\n if params_ndims is not None and batch_dims >= params_ndims:\n raise ValueError(\"batch_dims = %d must be less than rank(params) = %d\" %\n (batch_dims, params_ndims))\n\n expand = batch_dims == 0\n if expand:\n # Normally gather_nd will be called when batch_dims == 0.\n # But if this function is called with batch_dims = 0, e.g. for testing\n # purposes, this adds a dummy batch dimension to make batch_dims = 1.\n params = expand_dims(params, axis=0)\n indices = expand_dims(indices, axis=0)\n batch_dims = 1\n\n params_shape = shape(params)\n indices_shape = shape(indices)\n batch_shape = params_shape[:batch_dims]\n batch_size = gen_math_ops.prod(batch_shape, [0])\n index_internal_ndims = rank(indices) - batch_dims - 1\n indices_internal_shape = indices_shape[batch_dims:-1]\n\n # Assuming a 'params' with shape [b1, ..., bM, g1, ..., gN] and an 'indices'\n # with shape [b1, ..., bM, i1, ..., iK, C], where C <= N, we need to modify\n # 'indices' s.t. it has shape [i1, ..., iK, D], where D <= M + N and slices\n # to the entire 'params' tensor.\n # Assuming we have a batch of shape [B1, B2], we use meshgrid to create a\n # grid of size B1 x B2.\n batch_dim_list = unstack(batch_shape, axis=0)\n dim_ranges = [\n gen_math_ops.cast(gen_math_ops._range(0, x, 1), indices.dtype)\n for x in batch_dim_list\n ]\n mesh_list = meshgrid(*dim_ranges, indexing=\"ij\") if dim_ranges else []\n # Then we flatten and stack the tensors to form a (B1.B2) by 2 matrix.\n flat_list = [reshape(x, shape=(-1,)) for x in mesh_list]\n index_grid = transpose(stack(flat_list, axis=0))\n # We need to concatenate these batch coordinates with the internal indices.\n # concat -> index_grid [B1.B2, 2] with indices [i1, ..., iK, C]\n # So we reshape them both to [(B1.B2), i1, ..., iK, *]\n index_grid_shape = shape(index_grid)\n index_grid = reshape(\n index_grid,\n concat([\n index_grid_shape[:1],\n ones(index_internal_ndims, dtype=dtypes.int32), index_grid_shape[1:]\n ],\n axis=0))\n tile_shape = concat(((1,), indices_internal_shape, (1,)), axis=0)\n index_grid = tile(index_grid, multiples=tile_shape)\n # index_grid now has shape [(B1.B2), i1, ..., iK, 2]\n flat_shape = concat(([batch_size], indices_shape[batch_dims:]), axis=0)\n flat_indices = reshape(indices, shape=flat_shape)\n # flat_indices now has shape [(B1.B2), i1, ..., iK, C]\n indices = concat((index_grid, flat_indices), axis=-1)\n # indices has shape [(B1.B2), i1, ..., iK, 2+C]\n out = gen_array_ops.gather_nd(params, indices)\n # out has shape [(B1.B2), i1, ..., iK, N-C]. Now we reshape batch to\n # its original form.\n out_shape = shape(out)\n out = reshape(out, shape=concat((batch_shape, out_shape[1:]), axis=0))\n if expand:\n out = squeeze(out, axis=0)\n return out\n\n\n# Define quantize_v2 here in order to make name the second-to-last attribute,\n# because round_mode was added later.\n# (And also now because of 'axis' processing).\n@tf_export(v1=[\"quantize_v2\"])\[email protected]_dispatch_support\[email protected](\n \"2017-10-25\",\n \"`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` \"\n \"instead.\") # pylint: disable=missing-docstring\ndef quantize_v2(\n input, # pylint: disable=redefined-builtin\n min_range,\n max_range,\n T,\n mode=\"MIN_COMBINED\",\n name=None,\n round_mode=\"HALF_AWAY_FROM_ZERO\",\n narrow_range=False,\n axis=None,\n ensure_minimum_range=0.01):\n if axis is None:\n axis = -1\n elif axis < 0:\n if input.shape.ndims is None:\n raise ValueError(\"input should have known rank to use negative axis.\")\n axis %= input.shape.ndims\n\n if ensure_minimum_range != 0.01:\n return gen_array_ops.quantize_v2(\n input,\n min_range,\n max_range,\n T=T,\n mode=mode,\n name=name,\n round_mode=round_mode,\n narrow_range=narrow_range,\n axis=axis,\n ensure_minimum_range=ensure_minimum_range)\n return gen_array_ops.quantize_v2(\n input,\n min_range,\n max_range,\n T=T,\n mode=mode,\n name=name,\n round_mode=round_mode,\n narrow_range=narrow_range,\n axis=axis)\n\n\nquantize_v2.__doc__ = \"\"\"Please use `tf.quantization.quantize` instead.\"\"\"\n\n\n# We want to expose tf.quantization.quantize instead of\n# tf.quantization.quantize; we can deprecate tf.quantization.quantize in next\n# version of TensorFlow.\n@tf_export(\"quantization.quantize\", v1=[\"quantization.quantize\", \"quantize\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"quantize\")\ndef quantize(\n input, # pylint: disable=redefined-builtin\n min_range,\n max_range,\n T,\n mode=\"MIN_COMBINED\",\n round_mode=\"HALF_AWAY_FROM_ZERO\",\n name=None,\n narrow_range=False,\n axis=None,\n ensure_minimum_range=0.01):\n \"\"\"Quantize the input tensor.\"\"\"\n if ensure_minimum_range != 0.01:\n return quantize_v2(\n input,\n min_range,\n max_range,\n T,\n mode=mode,\n round_mode=round_mode,\n name=name,\n narrow_range=narrow_range,\n axis=axis,\n ensure_minimum_range=ensure_minimum_range)\n return quantize_v2(\n input,\n min_range,\n max_range,\n T,\n mode=mode,\n round_mode=round_mode,\n name=name,\n narrow_range=narrow_range,\n axis=axis)\n\n\n@tf_export(\"quantization.dequantize\", v1=[\"quantization.dequantize\",\n \"dequantize\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"dequantize\")\ndef dequantize( # pylint: disable=missing-docstring\n input, # pylint: disable=redefined-builtin\n min_range,\n max_range,\n mode=\"MIN_COMBINED\",\n name=None,\n axis=None,\n narrow_range=False,\n dtype=dtypes.float32):\n if axis is None:\n axis = -1\n elif axis < 0:\n if input.shape.ndims is None:\n raise ValueError(\"input should have known rank to use negative axis.\")\n axis %= input.shape.ndims\n\n if axis >= 0 or narrow_range:\n return gen_array_ops.dequantize(\n input,\n min_range,\n max_range,\n mode=mode,\n name=name,\n narrow_range=narrow_range,\n axis=axis,\n dtype=dtype)\n return gen_array_ops.dequantize(\n input, min_range, max_range, mode=mode, name=name, dtype=dtype)\n\n\ndequantize.__doc__ = gen_array_ops.dequantize.__doc__\n\n\n@tf_export(\"quantization.quantize_and_dequantize\")\[email protected]_dispatch_support\ndef quantize_and_dequantize(\n input, # pylint: disable=redefined-builtin\n input_min,\n input_max,\n signed_input=True,\n num_bits=8,\n range_given=False,\n round_mode=\"HALF_TO_EVEN\",\n name=None,\n narrow_range=False,\n axis=None):\n \"\"\"Quantizes then dequantizes a tensor.\n\n Args:\n input: A `Tensor` to quantize and dequantize.\n input_min: If range_given=True, the minimum input value, that needs to be\n represented in the quantized representation. If axis is specified, this\n should be a vector of minimum values for each slice along axis.\n input_max: If range_given=True, the maximum input value that needs to be\n represented in the quantized representation. If axis is specified, this\n should be a vector of maximum values for each slice along axis.\n signed_input: True if the quantization is signed or unsigned.\n num_bits: The bitwidth of the quantization.\n range_given: If true use `input_min` and `input_max` for the range of the\n input, otherwise determine min and max from the input `Tensor`.\n round_mode: Rounding mode when rounding from float values to quantized ones.\n one of ['HALF_TO_EVEN', 'HALF_UP']\n name: Optional name for the operation.\n narrow_range: If true, then the absolute value of the quantized minimum\n value is the same as the quantized maximum value, instead of 1 greater.\n i.e. for 8 bit quantization, the minimum value is -127 instead of -128.\n axis: Integer. If specified, refers to a dimension of the input tensor, such\n that quantization will be per slice along that dimension.\n\n Returns:\n A `Tensor`. Each element is the result of quantizing and dequantizing the\n corresponding element of `input`.\n \"\"\"\n if axis is None:\n axis = -1\n elif axis < 0:\n if input.shape.ndims is None:\n raise ValueError(\"input should have known rank to use negative axis.\")\n axis %= input.shape.ndims\n\n return gen_array_ops.quantize_and_dequantize_v2(\n input,\n input_min=input_min,\n input_max=input_max,\n signed_input=signed_input,\n num_bits=num_bits,\n range_given=range_given,\n round_mode=round_mode,\n narrow_range=narrow_range,\n axis=axis,\n name=name)\n\n\n@tf_export(\"searchsorted\")\[email protected]_dispatch_support\ndef searchsorted(sorted_sequence,\n values,\n side=\"left\",\n out_type=dtypes.int32,\n name=None):\n \"\"\"Searches input tensor for values on the innermost dimension.\n\n A 2-D example:\n\n ```\n sorted_sequence = [[0, 3, 9, 9, 10],\n [1, 2, 3, 4, 5]]\n values = [[2, 4, 9],\n [0, 2, 6]]\n\n result = searchsorted(sorted_sequence, values, side=\"left\")\n\n result == [[1, 2, 2],\n [0, 1, 5]]\n\n result = searchsorted(sorted_sequence, values, side=\"right\")\n\n result == [[1, 2, 4],\n [0, 2, 5]]\n ```\n\n Args:\n sorted_sequence: N-D `Tensor` containing a sorted sequence.\n values: N-D `Tensor` containing the search values.\n side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to\n upper_bound.\n out_type: The output type (`int32` or `int64`). Default is `tf.int32`.\n name: Optional name for the operation.\n\n Returns:\n An N-D `Tensor` the size of values containing the result of applying either\n lower_bound or upper_bound (depending on side) to each value. The result\n is not a global index to the entire `Tensor`, but the index in the last\n dimension.\n\n Raises:\n ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.\n If the total size of values exceeds `2^31 - 1` elements.\n If the first `N-1` dimensions of the two tensors don't match.\n \"\"\"\n sequence_size = shape_internal(sorted_sequence)[-1]\n values_size = shape_internal(values)[-1]\n sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])\n values_2d = reshape(values, [-1, values_size])\n if side == \"right\":\n output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type,\n name)\n elif side == \"left\":\n output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type,\n name)\n else:\n raise ValueError(\"side must be either 'right' or 'left'. Saw: %s.\" % side)\n return reshape(output, shape_internal(values))\n\n\nquantize.__doc__ = gen_array_ops.quantize_v2.__doc__\n\n\n@tf_export(\"image.extract_patches\")\[email protected]_dispatch_support\ndef extract_image_patches_v2(images, sizes, strides, rates, padding, name=None):\n r\"\"\"Extract `patches` from `images`.\n\n This op collects patches from the input image, as if applying a\n convolution. All extracted patches are stacked in the depth (last) dimension\n of the output.\n\n Specifically, the op extracts patches of shape `sizes` which are `strides`\n apart in the input image. The output is subsampled using the `rates` argument,\n in the same manner as \"atrous\" or \"dilated\" convolutions.\n\n The result is a 4D tensor which is indexed by batch, row, and column.\n `output[i, x, y]` contains a flattened patch of size `sizes[1], sizes[2]`\n which is taken from the input starting at\n `images[i, x*strides[1], y*strides[2]]`.\n\n Each output patch can be reshaped to `sizes[1], sizes[2], depth`, where\n `depth` is `images.shape[3]`.\n\n The output elements are taken from the input at intervals given by the `rate`\n argument, as in dilated convolutions.\n\n The `padding` argument has no effect on the size of each patch, it determines\n how many patches are extracted. If `VALID`, only patches which are fully\n contained in the input image are included. If `SAME`, all patches whose\n starting point is inside the input are included, and areas outside the input\n default to zero.\n\n Example:\n\n ```\n n = 10\n # images is a 1 x 10 x 10 x 1 array that contains the numbers 1 through 100\n images = [[[[x * n + y + 1] for y in range(n)] for x in range(n)]]\n\n # We generate two outputs as follows:\n # 1. 3x3 patches with stride length 5\n # 2. Same as above, but the rate is increased to 2\n tf.image.extract_patches(images=images,\n sizes=[1, 3, 3, 1],\n strides=[1, 5, 5, 1],\n rates=[1, 1, 1, 1],\n padding='VALID')\n\n # Yields:\n [[[[ 1 2 3 11 12 13 21 22 23]\n [ 6 7 8 16 17 18 26 27 28]]\n [[51 52 53 61 62 63 71 72 73]\n [56 57 58 66 67 68 76 77 78]]]]\n ```\n\n If we mark the pixels in the input image which are taken for the output with\n `*`, we see the pattern:\n\n ```\n * * * 4 5 * * * 9 10\n * * * 14 15 * * * 19 20\n * * * 24 25 * * * 29 30\n 31 32 33 34 35 36 37 38 39 40\n 41 42 43 44 45 46 47 48 49 50\n * * * 54 55 * * * 59 60\n * * * 64 65 * * * 69 70\n * * * 74 75 * * * 79 80\n 81 82 83 84 85 86 87 88 89 90\n 91 92 93 94 95 96 97 98 99 100\n ```\n\n ```\n tf.image.extract_patches(images=images,\n sizes=[1, 3, 3, 1],\n strides=[1, 5, 5, 1],\n rates=[1, 2, 2, 1],\n padding='VALID')\n\n # Yields:\n [[[[ 1 3 5 21 23 25 41 43 45]\n [ 6 8 10 26 28 30 46 48 50]]\n\n [[ 51 53 55 71 73 75 91 93 95]\n [ 56 58 60 76 78 80 96 98 100]]]]\n ```\n\n We can again draw the effect, this time using the symbols `*`, `x`, `+` and\n `o` to distinguish the patches:\n\n ```\n * 2 * 4 * x 7 x 9 x\n 11 12 13 14 15 16 17 18 19 20\n * 22 * 24 * x 27 x 29 x\n 31 32 33 34 35 36 37 38 39 40\n * 42 * 44 * x 47 x 49 x\n + 52 + 54 + o 57 o 59 o\n 61 62 63 64 65 66 67 68 69 70\n + 72 + 74 + o 77 o 79 o\n 81 82 83 84 85 86 87 88 89 90\n + 92 + 94 + o 97 o 99 o\n ```\n\n Args:\n images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth]\n sizes: The size of the extracted patches. Must be [1, size_rows, size_cols,\n 1].\n strides: A 1-D Tensor of length 4. How far the centers of two consecutive\n patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`.\n rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`.\n This is the input stride, specifying how far two consecutive patch samples\n are in the input. Equivalent to extracting patches with `patch_sizes_eff =\n patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling\n them spatially by a factor of `rates`. This is equivalent to `rate` in\n dilated (a.k.a. Atrous) convolutions.\n padding: The type of padding algorithm to use.\n name: A name for the operation (optional).\n\n Returns:\n A 4-D Tensor of the same type as the input.\n \"\"\"\n return gen_array_ops.extract_image_patches(images, sizes, strides, rates,\n padding, name)\n\n\n@tf_export(v1=[\"image.extract_image_patches\", \"extract_image_patches\"])\[email protected]_dispatch_support\[email protected]_args(None, \"ksizes is deprecated, use sizes instead\",\n \"ksizes\")\ndef extract_image_patches( # pylint: disable=missing-docstring\n images,\n ksizes=None,\n strides=None,\n rates=None,\n padding=None,\n name=None,\n sizes=None):\n \"\"\"Extract patches from images and put them in the \"depth\" output dimension.\n\n Args:\n `images`: A `Tensor`. Must be one of the following types: `float32`,\n `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`,\n `uint16`, `half`, `uint32`, `uint64`. 4-D Tensor with shape\n `[batch, in_rows, in_cols, depth]`. `ksizes`: A list of `ints` that has\n length `>= 4`. The size of the sliding window for each\n dimension of `images`. `strides`: A list of `ints` that has length `>= 4`.\n 1-D of length 4. How far the centers of two consecutive\n patches are in the images. Must be:\n `[1, stride_rows, stride_cols, 1]`. `rates`: A list of `ints`\n that has length `>= 4`. 1-D of length 4. Must be: `[1, rate_rows, rate_cols,\n 1]`. This is the input stride, specifying how far two consecutive patch\n samples are in the input. Equivalent to extracting patches with\n `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`,\n followed by subsampling them spatially by a factor of `rates`. This is\n equivalent to `rate` in dilated (a.k.a. Atrous) convolutions.\n `padding`: A `string` from: \"SAME\", \"VALID\". The type of padding algorithm\n to use.\n We specify the size-related attributes as: ``` ksizes = [1, ksize_rows,\n ksize_cols, 1] strides = [1, strides_rows, strides_cols, 1] rates = [1,\n rates_rows, rates_cols, 1]\n name: A name for the operation (optional). ```\n\n Returns:\n A Tensor. Has the same type as images.\n \"\"\"\n ksizes = deprecation.deprecated_argument_lookup(\"sizes\", sizes, \"ksizes\",\n ksizes)\n return gen_array_ops.extract_image_patches(images, ksizes, strides, rates,\n padding, name)\n\n\nextract_image_patches.__doc__ = gen_array_ops.extract_image_patches.__doc__\n\n\n@tf_export(\"fingerprint\")\[email protected]_dispatch_support\ndef fingerprint(data, method=\"farmhash64\", name=None):\n r\"\"\"Generates fingerprint values.\n\n Generates fingerprint values of `data`.\n\n Fingerprint op considers the first dimension of `data` as the batch dimension,\n and `output[i]` contains the fingerprint value generated from contents in\n `data[i, ...]` for all `i`.\n\n Fingerprint op writes fingerprint values as byte arrays. For example, the\n default method `farmhash64` generates a 64-bit fingerprint value at a time.\n This 8-byte value is written out as an `tf.uint8` array of size 8, in\n little-endian order.\n\n For example, suppose that `data` has data type `tf.int32` and shape (2, 3, 4),\n and that the fingerprint method is `farmhash64`. In this case, the output\n shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the\n size of each fingerprint value in bytes. `output[0, :]` is generated from\n 12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from\n other 12 integers in `data[1, :, :]`.\n\n Note that this op fingerprints the raw underlying buffer, and it does not\n fingerprint Tensor's metadata such as data type and/or shape. For example, the\n fingerprint values are invariant under reshapes and bitcasts as long as the\n batch dimension remain the same:\n\n ```python\n tf.fingerprint(data) == tf.fingerprint(tf.reshape(data, ...))\n tf.fingerprint(data) == tf.fingerprint(tf.bitcast(data, ...))\n ```\n\n For string data, one should expect `tf.fingerprint(data) !=\n tf.fingerprint(tf.string.reduce_join(data))` in general.\n\n Args:\n data: A `Tensor`. Must have rank 1 or higher.\n method: A `Tensor` of type `tf.string`. Fingerprint method used by this op.\n Currently available method is `farmhash64`.\n name: A name for the operation (optional).\n\n Returns:\n A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to\n `data`'s first dimension, and the second dimension size depends on the\n fingerprint algorithm.\n \"\"\"\n return gen_array_ops.fingerprint(data, method, name)\n\n\ndef convert_to_int_tensor(tensor, name, dtype=dtypes.int32):\n \"\"\"Converts the given value to an integer Tensor.\"\"\"\n tensor = ops.convert_to_tensor(tensor, name=name, preferred_dtype=dtype)\n if tensor.dtype.is_integer:\n tensor = gen_math_ops.cast(tensor, dtype)\n else:\n raise TypeError(\"%s must be an integer tensor; dtype=%s\" %\n (name, tensor.dtype))\n return tensor\n\n\ndef get_positive_axis(axis, ndims, axis_name=\"axis\", ndims_name=\"ndims\"):\n \"\"\"Validate an `axis` parameter, and normalize it to be positive.\n\n If `ndims` is known (i.e., not `None`), then check that `axis` is in the\n range `-ndims <= axis < ndims`, and return `axis` (if `axis >= 0`) or\n `axis + ndims` (otherwise).\n If `ndims` is not known, and `axis` is positive, then return it as-is.\n If `ndims` is not known, and `axis` is negative, then report an error.\n\n Args:\n axis: An integer constant\n ndims: An integer constant, or `None`\n axis_name: The name of `axis` (for error messages).\n ndims_name: The name of `ndims` (for error messages).\n\n Returns:\n The normalized `axis` value.\n\n Raises:\n ValueError: If `axis` is out-of-bounds, or if `axis` is negative and\n `ndims is None`.\n \"\"\"\n if not isinstance(axis, int):\n raise TypeError(\"%s must be an int; got %s\" %\n (axis_name, type(axis).__name__))\n if ndims is not None:\n if 0 <= axis < ndims:\n return axis\n elif -ndims <= axis < 0:\n return axis + ndims\n else:\n raise ValueError(\"%s=%s out of bounds: expected %s<=%s<%s\" %\n (axis_name, axis, -ndims, axis_name, ndims))\n elif axis < 0:\n raise ValueError(\"%s may only be negative if %s is statically known.\" %\n (axis_name, ndims_name))\n return axis\n\n\n# This op is intended to exactly match the semantics of numpy.repeat, with\n# one exception: numpy.repeat has special (and somewhat non-intuitive) behavior\n# when axis is not specified. Rather than implement that special behavior, we\n# simply make `axis` be a required argument.\n#\n# External (OSS) `tf.repeat` feature request:\n# https://github.com/tensorflow/tensorflow/issues/8246\ndef repeat_with_axis(data, repeats, axis, name=None):\n \"\"\"Repeats elements of `data`.\n\n Args:\n data: An `N`-dimensional tensor.\n repeats: A 1-D integer tensor specifying how many times each element in\n `axis` should be repeated. `len(repeats)` must equal `data.shape[axis]`.\n Supports broadcasting from a scalar value.\n axis: `int`. The axis along which to repeat values. Must be less than\n `max(N, 1)`.\n name: A name for the operation.\n\n Returns:\n A tensor with `max(N, 1)` dimensions. Has the same shape as `data`,\n except that dimension `axis` has size `sum(repeats)`.\n\n Example usage:\n\n >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)\n <tf.Tensor: shape=(5,), dtype=string,\n numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)\n <tf.Tensor: shape=(5, 2), dtype=int32, numpy=\n array([[1, 2],\n [1, 2],\n [3, 4],\n [3, 4],\n [3, 4]], dtype=int32)>\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)\n <tf.Tensor: shape=(2, 5), dtype=int32, numpy=\n array([[1, 1, 2, 2, 2],\n [3, 3, 4, 4, 4]], dtype=int32)>\n\n \"\"\"\n if not isinstance(axis, int):\n raise TypeError(\"axis must be an int; got %s\" % type(axis).__name__)\n\n with ops.name_scope(name, \"Repeat\", [data, repeats]):\n data = ops.convert_to_tensor(data, name=\"data\")\n repeats = convert_to_int_tensor(repeats, name=\"repeats\")\n repeats.shape.with_rank_at_most(1)\n\n # If `data` is a scalar, then upgrade it to a vector.\n data = _with_nonzero_rank(data)\n data_shape = shape(data)\n\n # If `axis` is negative, then convert it to a positive value.\n axis = get_positive_axis(axis, data.shape.rank, ndims_name=\"rank(data)\")\n\n # If we know that `repeats` is a scalar, then we can just tile & reshape.\n if repeats.shape.num_elements() == 1:\n repeats = reshape(repeats, [])\n expanded = expand_dims(data, axis + 1)\n tiled = tile_one_dimension(expanded, axis + 1, repeats)\n result_shape = concat([\n data_shape[:axis], [repeats * data_shape[axis]], data_shape[axis + 1:]\n ],\n axis=0)\n return reshape(tiled, result_shape)\n\n\n # Check data Tensor shapes.\n if repeats.shape.ndims == 1:\n data.shape.dims[axis].assert_is_compatible_with(repeats.shape[0])\n\n repeats = broadcast_to(repeats, [data_shape[axis]])\n repeats_original = repeats\n\n # Broadcast the `repeats` tensor so rank(repeats) == axis + 1.\n if repeats.shape.ndims != axis + 1:\n repeats_shape = shape(repeats)\n repeats_ndims = rank(repeats)\n broadcast_shape = concat(\n [data_shape[:axis + 1 - repeats_ndims], repeats_shape], axis=0)\n repeats = broadcast_to(repeats, broadcast_shape)\n repeats.set_shape([None] * (axis + 1))\n\n # Create a \"sequence mask\" based on `repeats`, where slices across `axis`\n # contain one `True` value for each repetition. E.g., if\n # `repeats = [3, 1, 2]`, then `mask = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]`.\n max_repeat = gen_math_ops.maximum(\n 0, gen_math_ops._max(repeats, _all_dimensions(repeats)))\n mask = sequence_mask(repeats, max_repeat)\n\n # Add a new dimension around each value that needs to be repeated, and\n # then tile that new dimension to match the maximum number of repetitions.\n expanded = expand_dims(data, axis + 1)\n tiled = tile_one_dimension(expanded, axis + 1, max_repeat)\n\n # Use `boolean_mask` to discard the extra repeated values. This also\n # flattens all dimensions up through `axis`.\n masked = boolean_mask(tiled, mask)\n\n # Reshape the output tensor to add the outer dimensions back.\n if axis == 0:\n result = masked\n else:\n repeated_dim_size = gen_math_ops._sum(\n repeats_original,\n axis=gen_math_ops._range(0, rank(repeats_original), 1))\n result_shape = concat(\n [data_shape[:axis], [repeated_dim_size], data_shape[axis + 1:]],\n axis=0)\n result = reshape(masked, result_shape)\n\n # Preserve shape information.\n if data.shape.ndims is not None:\n new_axis_size = 0 if repeats.shape[0] == 0 else None\n result.set_shape(data.shape[:axis].concatenate(\n [new_axis_size]).concatenate(data.shape[axis + 1:]))\n\n return result\n\n\ndef tile_one_dimension(data, axis, multiple):\n \"\"\"Tiles a single dimension of a tensor.\"\"\"\n # Assumes axis is a nonnegative int.\n if data.shape.ndims is not None:\n multiples = [1] * data.shape.ndims\n multiples[axis] = multiple\n else:\n ones_value = ones(rank(data), dtypes.int32)\n multiples = concat([ones_value[:axis], [multiple], ones_value[axis + 1:]],\n axis=0)\n return tile(data, multiples)\n\n\ndef _with_nonzero_rank(data):\n \"\"\"If `data` is scalar, then add a dimension; otherwise return as-is.\"\"\"\n if data.shape.ndims is not None:\n if data.shape.ndims == 0:\n return stack([data])\n else:\n return data\n else:\n data_shape = shape(data)\n data_ndims = rank(data)\n return reshape(data, concat([[1], data_shape], axis=0)[-data_ndims:])\n\n\n@tf_export(\"repeat\")\[email protected]_dispatch_support\ndef repeat(input, repeats, axis=None, name=None): # pylint: disable=redefined-builtin\n \"\"\"Repeat elements of `input`.\n \n See also `tf.concat`, `tf.stack`, `tf.tile`.\n\n Args:\n input: An `N`-dimensional Tensor.\n repeats: An 1-D `int` Tensor. The number of repetitions for each element.\n repeats is broadcasted to fit the shape of the given axis. `len(repeats)`\n must equal `input.shape[axis]` if axis is not None.\n axis: An int. The axis along which to repeat values. By default (axis=None),\n use the flattened input array, and return a flat output array.\n name: A name for the operation.\n\n Returns:\n A Tensor which has the same shape as `input`, except along the given axis.\n If axis is None then the output array is flattened to match the flattened\n input array.\n\n Example usage:\n\n >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)\n <tf.Tensor: shape=(5,), dtype=string,\n numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>\n\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)\n <tf.Tensor: shape=(5, 2), dtype=int32, numpy=\n array([[1, 2],\n [1, 2],\n [3, 4],\n [3, 4],\n [3, 4]], dtype=int32)>\n\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)\n <tf.Tensor: shape=(2, 5), dtype=int32, numpy=\n array([[1, 1, 2, 2, 2],\n [3, 3, 4, 4, 4]], dtype=int32)>\n\n >>> repeat(3, repeats=4)\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([3, 3, 3, 3], dtype=int32)>\n\n >>> repeat([[1,2], [3,4]], repeats=2)\n <tf.Tensor: shape=(8,), dtype=int32,\n numpy=array([1, 1, 2, 2, 3, 3, 4, 4], dtype=int32)>\n\n \"\"\"\n if axis is None:\n input = reshape(input, [-1])\n axis = 0\n return repeat_with_axis(input, repeats, axis, name)\n" ]
[ [ "tensorflow.python.ops.gen_math_ops.select_v2", "tensorflow.python.util.tf_decorator.make_decorator", "tensorflow.python.ops.gen_array_ops.list_diff", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.ops.gen_array_ops.strided_slice", "tensorflow.python.ops.gen_array_ops.dequantize", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.gen_array_ops.rank", "tensorflow.python.ops.gen_array_ops.identity", "tensorflow.python.ops.gen_array_ops.matrix_diag_v3", "tensorflow.python.ops.gen_array_ops.quantize_and_dequantize_v2", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.python.ops.gen_array_ops.reshape", "tensorflow.python.ops.gen_array_ops.split", "tensorflow.python.ops.gen_array_ops.unique_with_counts", "tensorflow.python.ops.gen_array_ops.placeholder", "tensorflow.python.util.deprecation.deprecated_args", "tensorflow.python.ops.gen_array_ops.extract_image_patches", "numpy.array", "tensorflow.python.ops.gen_array_ops.pack", "tensorflow.python.ops.gen_array_ops.edit_distance", "tensorflow.python.ops.gen_array_ops.size", "tensorflow.python.util.nest.flatten", "tensorflow.python.ops.gen_math_ops._range", "tensorflow.python.ops.gen_array_ops.unique", "tensorflow.python.ops.gen_array_ops.squeeze", "tensorflow.python.ops.gen_array_ops.quantize_v2", "tensorflow.python.ops.gen_array_ops.space_to_depth", "tensorflow.python.ops.gen_array_ops.where", "tensorflow.python.ops.gen_array_ops.concat_v2", "tensorflow.python.ops.gen_array_ops.one_hot", "tensorflow.python.framework.common_shapes.broadcast_shape", "tensorflow.python.ops.gen_array_ops.pad", "tensorflow.python.framework.tensor_util.maybe_set_static_shape", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.gen_array_ops.matrix_set_diag_v3", "tensorflow.python.framework.ops.Tensor._override_operator", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.tensor_shape.unknown_shape", "numpy.isscalar", "tensorflow.python.ops.gen_array_ops.fill", "tensorflow.python.ops.gen_array_ops.matrix_diag_part_v3", "tensorflow.python.ops.gen_array_ops.reverse_sequence", "tensorflow.python.framework.tensor_util.constant_value_as_shape", "tensorflow.python.util.deprecation.deprecated_endpoints", "tensorflow.python.ops.gen_array_ops.split_v", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.framework.ops.IndexedSlices", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.gen_array_ops.depth_to_space", "tensorflow.python.ops.gen_array_ops.lower_bound", "tensorflow.python.framework.ops.register_tensor_conversion_function", "tensorflow.python.ops.gen_array_ops.unpack", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.python.ops.gen_array_ops.gather_nd", "tensorflow.python.framework.tensor_shape.is_fully_defined", "tensorflow.python.ops.gen_math_ops.cast", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.gen_array_ops.shape", "tensorflow.python.util.deprecation.deprecated_argument_lookup", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.gen_math_ops.select", "tensorflow.python.ops.gen_array_ops.gather_v2", "tensorflow.python.ops.gen_math_ops.prod", "tensorflow.python.ops.gen_array_ops.fingerprint", "tensorflow.python.ops.gen_array_ops.placeholder_with_default", "tensorflow.python.ops.gen_array_ops.upper_bound", "numpy.arange", "tensorflow.python.util.nest.map_structure", "tensorflow.python.ops.gen_array_ops.broadcast_args", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.ops.gen_array_ops.pad_v2", "tensorflow.python.ops.gen_array_ops.zeros_like", "tensorflow.python.ops.gen_array_ops.shape_n", "tensorflow.python.ops.gen_array_ops._slice", "tensorflow.python.ops.gen_array_ops.mirror_pad", "tensorflow.python.ops.gen_array_ops.expand_dims", "numpy.prod", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.framework.constant_op.constant" ] ]
shaominghe/stargan_adience
[ "7b59cae38acd0f32bf63695280b833ba7366e804" ]
[ "solver_classification.py" ]
[ "from model import Generator\nfrom model import Discriminator\nfrom torch.autograd import Variable\nfrom torchvision.utils import save_image\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom model_agecomparison import Classificationmodel, getCossloss, getKLloss\nimport os\nimport time\nimport datetime\nfrom torch import nn\n\n\nclass Solver(object):\n \"\"\"Solver for training and testing StarGAN.\"\"\"\n\n def __init__(self, classification_loader,test_loader, config):\n \"\"\"Initialize configurations.\"\"\"\n\n # Data loader.\n self.classification_loader = classification_loader\n self.test_loader=test_loader\n\n\n\n self.image_size = config.image_size\n\n\n # Training configurations.\n self.dataset = config.dataset\n self.batch_size = config.batch_size\n self.num_iters = config.num_iters\n self.num_iters_decay = config.num_iters_decay\n self.classification_lr = config.classification_lr\n\n self.beta1 = config.beta1\n self.beta2 = config.beta2\n\n self.resume_iters = config.resume_iters\n\n # Test configurations.\n self.test_iters = config.test_iters\n\n # Miscellaneous.\n self.use_tensorboard = config.use_tensorboard\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # Directories.\n self.log_dir = config.log_dir\n self.sample_dir = config.sample_dir\n self.model_save_dir = config.model_save_dir\n self.result_dir = config.result_dir\n\n # Step size.\n self.log_step = config.log_step\n self.sample_step = config.sample_step\n self.model_save_step = config.model_save_step\n self.lr_update_step = config.lr_update_step\n self.vgg_type=config.vgg_type\n\n # Build the model and tensorboard.\n self.build_model()\n if self.use_tensorboard:\n self.build_tensorboard()\n\n def build_model(self):\n \"\"\"Create a generator and a discriminator.\"\"\"\n self.classification_modle=Classificationmodel(self.vgg_type)\n\n\n self.classification_optimizer = torch.optim.Adam(self.classification_modle.parameters(), self.classification_lr, [self.beta1, self.beta2])\n self.print_network(self.classification_modle, 'G')\n\n self.classification_modle.to(self.device)\n\n def print_network(self, model, name):\n \"\"\"Print out the network information.\"\"\"\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(model)\n print(name)\n print(\"Total number of parameters : %.3f M' \" % (num_params / 1e6))\n\n def restore_model(self, resume_iters):\n \"\"\"Restore the trained generator and discriminator.\"\"\"\n print('Loading the trained models from step {}...'.format(resume_iters))\n classfication_path = os.path.join(self.model_save_dir, '{}-classfication.ckpt'.format(resume_iters))\n self.classification_modle.load_state_dict(torch.load(classfication_path, map_location=lambda storage, loc: storage))\n\n def build_tensorboard(self):\n \"\"\"Build a tensorboard logger.\"\"\"\n from logger import Logger\n self.logger = Logger(self.log_dir)\n\n def update_lr(self, g_lr):\n \"\"\"Decay learning rates of the generator and discriminator.\"\"\"\n for param_group in self.classification_modle.param_groups:\n param_group['lr'] = g_lr\n\n\n def reset_grad(self):\n \"\"\"Reset the gradient buffers.\"\"\"\n self.classification_optimizer.zero_grad()\n\n def denorm(self, x):\n \"\"\"Convert the range from [-1, 1] to [0, 1].\"\"\"\n out = (x + 1) / 2\n return out.clamp_(0, 1)\n\n def gradient_penalty(self, y, x):\n \"\"\"Compute gradient penalty: (L2_norm(dy/dx) - 1)**2.\"\"\"\n weight = torch.ones(y.size()).to(self.device)\n dydx = torch.autograd.grad(outputs=y,\n inputs=x,\n grad_outputs=weight,\n retain_graph=True,\n create_graph=True,\n only_inputs=True)[0]\n\n dydx = dydx.view(dydx.size(0), -1)\n dydx_l2norm = torch.sqrt(torch.sum(dydx ** 2, dim=1))\n return torch.mean((dydx_l2norm - 1) ** 2)\n\n def label2onehot(self, labels, dim):\n \"\"\"Convert label indices to one-hot vectors.\"\"\"\n batch_size = labels.size(0)\n out = torch.zeros(batch_size, dim)\n out[np.arange(batch_size), labels.long()] = 1\n return out\n\n def createage_labels(self, c_org, c_dim=5, selected_attrs=None):\n\n age_color_indices = []\n for i, attr_name in enumerate(selected_attrs):\n if attr_name in [\"(4,\", \"(25,\", \"(0,\", \"(8,\", \"(15,\", \"(38,\", \"(48,\", \"(60,\"]:\n age_color_indices.append(i)\n c_trg_list = []\n for i in range(c_dim):\n\n c_trg = c_org.clone()\n if i in age_color_indices: # Set one hair color to 1 and the rest to 0.\n c_trg[:, i] = 1\n for j in age_color_indices:\n if j != i:\n c_trg[:, j] = 0\n else:\n c_trg[:, i] = (c_trg[:, i] == 0) # Reverse attribute value.\n\n c_trg_list.append(c_trg.to(self.device))\n return c_trg_list\n\n def create_labels(self, c_org, c_dim=5, dataset='CelebA', selected_attrs=None):\n \"\"\"Generate target domain labels for debugging and testing.\"\"\"\n # Get hair color indices.\n if dataset == 'CelebA':\n hair_color_indices = []\n for i, attr_name in enumerate(selected_attrs):\n if attr_name in ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair']:\n hair_color_indices.append(i)\n\n c_trg_list = []\n for i in range(c_dim):\n if dataset == 'CelebA':\n c_trg = c_org.clone()\n if i in hair_color_indices: # Set one hair color to 1 and the rest to 0.\n c_trg[:, i] = 1\n for j in hair_color_indices:\n if j != i:\n c_trg[:, j] = 0\n else:\n c_trg[:, i] = (c_trg[:, i] == 0) # Reverse attribute value.\n elif dataset == 'RaFD':\n c_trg = self.label2onehot(torch.ones(c_org.size(0)) * i, c_dim)\n\n c_trg_list.append(c_trg.to(self.device))\n return c_trg_list\n\n def classification_loss(self, logit, target, dataset='CelebA'):\n \"\"\"Compute binary or softmax cross entropy loss.\"\"\"\n if dataset == 'CelebA' or dataset == 'adience':\n return F.cross_entropy(logit, target, size_average=False) / logit.size(0)\n elif dataset == 'RaFD':\n return F.cross_entropy(logit, target)\n\n def train(self):\n \"\"\"Train StarGAN within a single dataset.\"\"\"\n # Set data loader.\n # if self.dataset == 'CelebA':\n # data_loader = self.celeba_loader\n # elif self.dataset == 'RaFD':\n # data_loader = self.rafd_loader\n # elif self.dataset == 'adience':\n # data_loader = self.adience_loader\n data_loader=self.classification_loader\n test_loader=self.test_loader\n self.log_name = os.path.join(self.log_dir, 'loss_log.txt')\n # self.transform(image), torch.FloatTensor(one), torch.FloatTensor(cost_one), torch.FloatTensor(y_sig01)\n with open(self.log_name, \"a\") as log_file:\n now = time.strftime(\"%c\")\n log_file.write('================ Training Loss (%s) ================\\n' % now)\n\n # Fetch fixed inputs for debugging.\n\n\n # Learning rate cache for decaying.\n classification_lr = self.classification_lr\n\n # Start training from scratch or resume training.\n start_iters = 0\n if self.resume_iters:\n start_iters = self.resume_iters\n self.restore_model(self.resume_iters)\n\n # Start training.\n print('Start training...')\n start_time = time.time()\n for i in range(start_iters, self.num_iters):\n\n # =================================================================================== #\n # 1. Preprocess input data #\n # =================================================================================== #\n\n # Fetch real images and labels.\n try:\n image, one,cost_one,y_sig01 = next(data_iter)\n except:\n data_iter = iter(data_loader)\n image, one,cost_one,y_sig01= next(data_iter)\n\n\n\n # =================================================================================== #\n # 2. Train the discriminator #\n # =================================================================================== #\n\n # Compute loss with real images.\n class_fc1, class_fc2=self.classification_modle(image)\n\n Cossloss=getCossloss(class_fc1, one, cost_one)\n KLloss=getKLloss(class_fc2, y_sig01)\n\n outloss=Cossloss+KLloss\n self.reset_grad()\n outloss.backward()\n self.classification_optimizer.step()\n\n\n # Logging.\n loss = {}\n loss['classfication/Cossloss'] = Cossloss.item()\n loss['classfication/KLloss'] = KLloss.item()\n\n\n # =================================================================================== #\n # 3. Train the generator #\n # =================================================================================== #\n\n # =================================================================================== #\n # 4. Miscellaneous #\n # =================================================================================== #\n\n # Print out training information.\n if (i + 1) % self.log_step == 0:\n et = time.time() - start_time\n et = str(datetime.timedelta(seconds=et))[:-7]\n log_time = time.strftime(\"[%d/%m/%Y %H:%M:%S]\")\n\n log = \"{}, Elapsed [{}], Iteration [{}/{}]\".format(log_time, et, i + 1, self.num_iters)\n for tag, value in loss.items():\n log += \", {}: {:.4f}\".format(tag, value)\n print(log)\n with open(self.log_name, \"a\") as log_file:\n log_file.write('%s\\n' % log) # save the message\n\n if self.use_tensorboard:\n for tag, value in loss.items():\n self.logger.scalar_summary(tag, value, i + 1)\n\n # # Translate fixed images for debugging.\n # if (i + 1) % self.sample_step == 0:\n # with torch.no_grad():\n # x_fake_list = [x_fixed]\n # for c_fixed in c_fixed_list:\n # x_fake_list.append(self.G(x_fixed, c_fixed))\n # x_concat = torch.cat(x_fake_list, dim=3)\n # sample_path = os.path.join(self.sample_dir, '{}-images.jpg'.format(i + 1))\n # save_image(self.denorm(x_concat.data.cpu()), sample_path, nrow=1, padding=0)\n # print('Saved real and fake images into {}...'.format(sample_path))\n\n # Save model checkpoints.\n if (i + 1) % self.model_save_step == 0:\n classification_path = os.path.join(self.model_save_dir, '{}-classification.ckpt'.format(i + 1))\n torch.save(self.classification_modle.state_dict(), classification_path)\n print('Saved model checkpoints into {}...'.format(self.model_save_dir))\n\n # Decay learning rates.\n if (i + 1) % self.lr_update_step == 0 and (i + 1) > (self.num_iters - self.num_iters_decay):\n classification_lr -= (self.classification_lr / float(self.num_iters_decay))\n\n self.update_lr(classification_lr)\n lr_str = 'Decayed learning rates, classification_lr: {}.'.format(classification_lr)\n with open(self.log_name, \"a\") as log_file:\n log_file.write('%s\\n' % lr_str) # save the message\n\n if (i + 1) % self.sample_step == 0:\n CA3_sum=0\n CA5_sum=0\n for i_train_batch, train_batch in enumerate(test_loader):\n image, one, cost_one, y_sig01=train_batch\n class_fc1, class_fc2 = self.classification_modle(image)\n result_index=torch.argmax(class_fc2,dim=-1)\n CA3=torch.abs(result_index-y_sig01)<=3\n CA5=torch.abs(result_index-y_sig01)<=5\n CA3_sum+=torch.sum(CA3)\n CA5_sum+=torch.sum(CA5)\n CA3_prescision=CA3_sum/torch.float(len(test_loader))\n CA5_prescision=CA5_sum/torch.float(len(test_loader))\n print('Saved model checkpoints into {}...'.format(self.model_save_dir))\n\n\n\n" ]
[ [ "torch.mean", "torch.abs", "torch.load", "torch.zeros", "numpy.arange", "torch.nn.functional.cross_entropy", "torch.sum", "torch.cuda.is_available", "torch.autograd.grad", "torch.argmax" ] ]
prerakgarg07/cloudyfsps
[ "4a6a185343ed1e09b9f201a465c37e377ef42101" ]
[ "demos/test_hden.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import (division, print_function, absolute_import,\n unicode_literals)\nimport os\nimport sys\nimport numpy as np\nimport fsps\nfrom past.utils import old_div\nfrom cloudyfsps.ASCIItools import (writeASCII, compileASCII, checkCompiled, compiledExists)\nfrom cloudyfsps.cloudyInputTools import *\nfrom cloudyfsps.generalTools import calcForLogQ\nfrom cloudyfsps.cloudyOutputTools import *\nfrom cloudyfsps.outputFormatting import *\n#import ipdb\n#runMake, formatAllOutput, writeFormattedOutput)\n\n# this code snippet goes through every step needed\n# to integrate FSPS into Cloudy.\n# This example uses stellar pops with a constant SFH\n# as the input ionizing source.\n# 1. Write an ascii file in Cloudy format with grid\n# of FSPS spectra in all available ages and\n# metallicities\n# 2. Compile asii file into binary format required\n# for Cloudy use. Assumes $CLOUDY_EXE is set to\n# your /path/to/cloudy.exe\n# 3. Writes Cloudy input files for a subset of grid\n# parameters.\n# 4. Runs Cloudy on the *.in files\n# 5. Formats the various output files\n\nzsun = 0.019\nexec_write_ascii = False\nexec_write_input = False\nexec_run_cloudy = False\nexec_write_output = True\nexec_gen_FSPS_grid = True\n\n# Function to write the ascii file.\n# This is where you set the properties of the\n# ionizing spectrum (SSP/CSFH, IMF, FBHB, etc)\n\ndef hden_ascii(fileout, **kwargs):\n # change these parameters to modify the ionizing source grid\n # default mode is to produce an ascii grid in age and Z,\n # though different variables and more dimensions are possible.\n sp_dict = dict(zcontinuous=1,\n imf_type=2,\n sfh=0,\n const=0.0,\n sf_start=0.0)\n sp = fsps.StellarPopulation(**sp_dict)\n # all ages and Zs\n ages = 10.**sp.log_age\n logZs = np.log10(old_div(sp.zlegend,zsun))\n print (ages,logZs)\n modpars = [(age, logZ) for age in ages for logZ in logZs]\n lam = sp.wavelengths\n all_fluxs = []\n for logZ in logZs:\n sp.params['logzsol'] = logZ\n all_fluxs.append(sp.get_spectrum()[1]) #lsun per hz\n nmod = len(modpars)\n # flatten flux for writing\n flat_flux = np.array([all_fluxs[j][i]\n for i in range(len(ages))\n for j in range(len(logZs))])\n # this function is flexible, ndim can be 3/4/n.\n # in this example, however, ndim is 2 (age, logz).\n writeASCII(fileout, lam, flat_flux, modpars,\n nx=len(lam), ndim=2, npar=2, nmod=nmod)\n return\n#---------------------------------------------------------------------\n# ASCII FILE: WRITE AND COMPILE\n#---------------------------------------------------------------------\n# assumes you have $CLOUDY_EXE and $CLOUDY_DATA_PATH set as sys vars.\n\n# name of ascii file\nascii_file = \"FSPS_PDVA_test.ascii\"\n\n# or if there is an already-compiled one you want to use, specify here\ncompiled_ascii = \"{}.mod\".format(ascii_file.split(\".\")[0])\n\nif exec_write_ascii:\n print(\"Executing write ascii sequence...\")\n if not compiledExists(ascii_file):\n print(\"No compiled model exists...Writing.\")\n hden_ascii(ascii_file)\n print(\"Compiling {} with Cloudy\".format(ascii_file))\n compileASCII(ascii_file)\n print(\"Checking to see if compilation was successful...\")\n if checkCompiled(ascii_file):\n print(\"Your model {} is ready to run.\".format(compiled_ascii))\n else:\n sys.exit()\n else:\n print(\"{} already exists.\".format(compiled_ascii))\n\n#---------------------------------------------------------------------\n# WRITE CLOUDY INPUT\n#---------------------------------------------------------------------\n# local folder to read and write *.in, *.out files\nmod_dir = '/home/prerak/codes/hden_test/redo/c17_pagb_test/'\n#mod_dir = \"/home/prerakgarg/redo/c17/\"\nmod_prefix = 'ZAU'\n\n# GRID PARAMETERS FOR CLOUDY RUN\n#--------------\n# ages between 1 and 7 Myr\n#ages = np.linspace(1., 7., 7)*1.e6i\n\n'''\nages = np.array([0.5, 1, 2, 3, 4, 5, 6, 7, 10, 20])*1.e6\n# stellar metallicities\nlogZs = np.array([-1.98, -1.5, -1.0, -0.6, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1, 0.198])\n# ionization parameters between -4 and -1\nlogUs = np.array([-4.0, -3.5, -3.0, -2.5, -2.0, -1.5, -1.0])\n'''\n\nages = np.array([1, 2,])*1.e6\n# stellar metallicities\nlogZs = np.array([-1.5])\n# ionization parameters between -4 and -1\nlogUs = np.array([-4.0])\n#logUs = [-1.]\n# Hydrogen density between 30 and 400\n#nhs = np.arange(50., 450., 200) # density of gas, cm-3\nnhs = [100]\n\n# Other default parameters based off of Byler+2017\nRinners = np.array([19.]) # inner radius of HII region, 3pc\nefrac = -1.0 # calculation is stopped when H is 10^efrac % neutral\nset_name='dopita' # abundances from Dopita+2001\ndust=False # don't include dust in nebula\nextra_output=False # include lots of outputs\n#-----------------------------------------------------------------\n\n# iterate through all of the above parameters\n# calcForLogQ just calculates Q = U*4*pi*Ri^2*nH\n\npars = np.array([(Z, a, U, R, calcForLogQ(logU=U, Rinner=10.0**R, nh=n), n, efrac)\n for Z in logZs\n for a in ages\n for U in logUs\n for R in Rinners\n for n in nhs])\n\nif exec_write_input:\n print('Writing input files...')\n writeParamFiles(dir_=mod_dir,\n model_prefix=mod_prefix,\n cloudy_mod=compiled_ascii,\n run_cloudy=False, # don't run yet\n ages=ages,\n logZs=logZs,\n logUs=logUs,\n r_inners=Rinners,\n nhs=nhs,\n use_Q=True,\n # if False, will use logU;\n # does not matter in this case,\n # since Q is calculated at\n # each specified logU.\n verbose=False, # don't print output to screen\n set_name=set_name,\n dust=dust,\n extra_output=extra_output)\n print('Wrote {} param files'.format(len(pars)))\nelse:\n print('Skipping input writing.')\n\n\n#---------------------------------------------------------------------\n# RUN CLOUDY ON ALL INPUT FILES\n#---------------------------------------------------------------------\nif exec_run_cloudy:\n print(\"Running Cloudy....\")\n runMake(dir_=mod_dir, n_proc=4, model_name=mod_prefix)\n print(\"Cloudy finished.\")\nelse:\n print(\"Not running Cloudy. Skipping to formatting output.\")\n\n\n#---------------------------------------------------------------------\n# FORMAT OUTPUT\n#---------------------------------------------------------------------\nif exec_write_output:\n print(\"Formatting output files...\\n\")\n formatAllOutput(mod_dir, mod_prefix, write_line_lum=False)\nelse:\n print(\"\\n\\nNot formatting output. DONE.\")\n\nif exec_gen_FSPS_grid:\n print(\"Creating FSPS input grids...\")\n writeFormattedOutput(mod_dir, mod_prefix, \"\")\nelse:\n print(\"\\n\\nNot formatting FSPS output. DONE.\")\n" ]
[ [ "numpy.array" ] ]
mengwa41/Ax
[ "fe20381214fd287a2088b0ccdd8c67337aaccf22" ]
[ "ax/service/tests/test_ax_client.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nimport sys\nimport time\nfrom math import ceil\nfrom typing import List, Tuple\nfrom unittest.mock import patch\n\nimport numpy as np\nfrom ax.core.arm import Arm\nfrom ax.core.base_trial import TrialStatus\nfrom ax.core.generator_run import GeneratorRun\nfrom ax.core.metric import Metric\nfrom ax.core.outcome_constraint import OutcomeConstraint\nfrom ax.core.parameter import (\n ChoiceParameter,\n FixedParameter,\n ParameterType,\n RangeParameter,\n)\nfrom ax.core.types import ComparisonOp\nfrom ax.exceptions.core import DataRequiredError\nfrom ax.metrics.branin import branin\nfrom ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy\nfrom ax.modelbridge.registry import MODEL_KEY_TO_MODEL_SETUP, Models\nfrom ax.service.ax_client import AxClient\nfrom ax.storage.sqa_store.db import init_test_engine_and_session_factory\nfrom ax.storage.sqa_store.decoder import Decoder\nfrom ax.storage.sqa_store.encoder import Encoder\nfrom ax.storage.sqa_store.sqa_config import SQAConfig\nfrom ax.storage.sqa_store.structs import DBSettings\nfrom ax.utils.common.testutils import TestCase\nfrom ax.utils.common.timeutils import current_timestamp_in_millis\nfrom ax.utils.common.typeutils import checked_cast, not_none\nfrom ax.utils.testing.modeling_stubs import get_observation1, get_observation1trans\n\n\ndef run_trials_using_recommended_parallelism(\n ax_client: AxClient,\n recommended_parallelism: List[Tuple[int, int]],\n total_trials: int,\n) -> int:\n remaining_trials = total_trials\n for num_trials, parallelism_setting in recommended_parallelism:\n if num_trials == -1:\n num_trials = remaining_trials\n for _ in range(ceil(num_trials / parallelism_setting)):\n in_flight_trials = []\n if parallelism_setting > remaining_trials:\n parallelism_setting = remaining_trials\n for _ in range(parallelism_setting):\n params, idx = ax_client.get_next_trial()\n in_flight_trials.append((params, idx))\n remaining_trials -= 1\n for _ in range(parallelism_setting):\n params, idx = in_flight_trials.pop()\n ax_client.complete_trial(idx, branin(params[\"x\"], params[\"y\"]))\n # If all went well and no errors were raised, remaining_trials should be 0.\n return remaining_trials\n\n\nclass TestAxClient(TestCase):\n \"\"\"Tests service-like API functionality.\"\"\"\n\n def setUp(self):\n # To avoid tests timing out due to GP fit / gen times.\n patch.dict(\n f\"{Models.__module__}.MODEL_KEY_TO_MODEL_SETUP\",\n {\"GPEI\": MODEL_KEY_TO_MODEL_SETUP[\"Sobol\"]},\n ).start()\n\n def test_interruption(self) -> None:\n ax_client = AxClient()\n ax_client.create_experiment(\n name=\"test\",\n parameters=[ # pyre-fixme[6]: expected union that should include\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n objective_name=\"branin\",\n minimize=True,\n )\n for i in range(6):\n parameterization, trial_index = ax_client.get_next_trial()\n self.assertFalse( # There should be non-complete trials.\n all(t.status.is_terminal for t in ax_client.experiment.trials.values())\n )\n x, y = parameterization.get(\"x\"), parameterization.get(\"y\")\n ax_client.complete_trial(\n trial_index,\n raw_data=checked_cast(\n float, branin(checked_cast(float, x), checked_cast(float, y))\n ),\n )\n old_client = ax_client\n serialized = ax_client.to_json_snapshot()\n ax_client = AxClient.from_json_snapshot(serialized)\n self.assertEqual(len(ax_client.experiment.trials.keys()), i + 1)\n self.assertIsNot(ax_client, old_client)\n self.assertTrue( # There should be no non-complete trials.\n all(t.status.is_terminal for t in ax_client.experiment.trials.values())\n )\n\n @patch(\n \"ax.modelbridge.base.observations_from_data\",\n autospec=True,\n return_value=([get_observation1()]),\n )\n @patch(\n \"ax.modelbridge.random.RandomModelBridge.get_training_data\",\n autospec=True,\n return_value=([get_observation1()]),\n )\n @patch(\n \"ax.modelbridge.random.RandomModelBridge._predict\",\n autospec=True,\n return_value=[get_observation1trans().data],\n )\n @patch(\n \"ax.modelbridge.random.RandomModelBridge.feature_importances\",\n autospec=True,\n return_value={\"x\": 0.9, \"y\": 1.1},\n )\n def test_default_generation_strategy_continuous(self, _a, _b, _c, _d) -> None:\n \"\"\"Test that Sobol+GPEI is used if no GenerationStrategy is provided.\"\"\"\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[ # pyre-fixme[6]: expected union that should include\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n objective_name=\"a\",\n minimize=True,\n )\n self.assertEqual(\n [s.model for s in not_none(ax_client.generation_strategy)._steps],\n [Models.SOBOL, Models.GPEI],\n )\n with self.assertRaisesRegex(ValueError, \".* no trials\"):\n ax_client.get_optimization_trace(objective_optimum=branin.fmin)\n for i in range(6):\n parameterization, trial_index = ax_client.get_next_trial()\n x, y = parameterization.get(\"x\"), parameterization.get(\"y\")\n ax_client.complete_trial(\n trial_index,\n raw_data={\n \"a\": (\n checked_cast(\n float,\n branin(checked_cast(float, x), checked_cast(float, y)),\n ),\n 0.0,\n )\n },\n sample_size=i,\n )\n self.assertEqual(ax_client.generation_strategy.model._model_key, \"GPEI\")\n ax_client.get_optimization_trace(objective_optimum=branin.fmin)\n ax_client.get_contour_plot()\n ax_client.get_feature_importances()\n trials_df = ax_client.get_trials_data_frame()\n self.assertIn(\"x\", trials_df)\n self.assertIn(\"y\", trials_df)\n self.assertIn(\"a\", trials_df)\n self.assertEqual(len(trials_df), 6)\n\n def test_default_generation_strategy_discrete(self) -> None:\n \"\"\"Test that Sobol is used if no GenerationStrategy is provided and\n the search space is discrete.\n \"\"\"\n # Test that Sobol is chosen when all parameters are choice.\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[ # pyre-fixme[6]: expected union that should include\n {\"name\": \"x\", \"type\": \"choice\", \"values\": [1, 2, 3]},\n {\"name\": \"y\", \"type\": \"choice\", \"values\": [1, 2, 3]},\n ]\n )\n self.assertEqual(\n [s.model for s in not_none(ax_client.generation_strategy)._steps],\n [Models.SOBOL],\n )\n self.assertEqual(ax_client.get_max_parallelism(), [(-1, -1)])\n self.assertTrue(ax_client.get_trials_data_frame().empty)\n\n def test_create_experiment(self) -> None:\n \"\"\"Test basic experiment creation.\"\"\"\n ax_client = AxClient(\n GenerationStrategy(\n steps=[GenerationStep(model=Models.SOBOL, num_trials=30)]\n )\n )\n with self.assertRaisesRegex(ValueError, \"Experiment not set on Ax client\"):\n ax_client.experiment\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\n \"name\": \"x\",\n \"type\": \"range\",\n \"bounds\": [0.001, 0.1],\n \"value_type\": \"float\",\n \"log_scale\": True,\n },\n {\n \"name\": \"y\",\n \"type\": \"choice\",\n \"values\": [1, 2, 3],\n \"value_type\": \"int\",\n \"is_ordered\": True,\n },\n {\"name\": \"x3\", \"type\": \"fixed\", \"value\": 2, \"value_type\": \"int\"},\n {\n \"name\": \"x4\",\n \"type\": \"range\",\n \"bounds\": [1.0, 3.0],\n \"value_type\": \"int\",\n },\n {\n \"name\": \"x5\",\n \"type\": \"choice\",\n \"values\": [\"one\", \"two\", \"three\"],\n \"value_type\": \"str\",\n },\n {\n \"name\": \"x6\",\n \"type\": \"range\",\n \"bounds\": [1.0, 3.0],\n \"value_type\": \"int\",\n },\n ],\n objective_name=\"test_objective\",\n minimize=True,\n outcome_constraints=[\"some_metric >= 3\", \"some_metric <= 4.0\"],\n parameter_constraints=[\"x4 <= x6\"],\n )\n assert ax_client._experiment is not None\n self.assertEqual(ax_client._experiment, ax_client.experiment)\n self.assertEqual(\n ax_client._experiment.search_space.parameters[\"x\"],\n RangeParameter(\n name=\"x\",\n parameter_type=ParameterType.FLOAT,\n lower=0.001,\n upper=0.1,\n log_scale=True,\n ),\n )\n self.assertEqual(\n ax_client._experiment.search_space.parameters[\"y\"],\n ChoiceParameter(\n name=\"y\",\n parameter_type=ParameterType.INT,\n values=[1, 2, 3],\n is_ordered=True,\n ),\n )\n self.assertEqual(\n ax_client._experiment.search_space.parameters[\"x3\"],\n FixedParameter(name=\"x3\", parameter_type=ParameterType.INT, value=2),\n )\n self.assertEqual(\n ax_client._experiment.search_space.parameters[\"x4\"],\n RangeParameter(\n name=\"x4\", parameter_type=ParameterType.INT, lower=1.0, upper=3.0\n ),\n )\n self.assertEqual(\n ax_client._experiment.search_space.parameters[\"x5\"],\n ChoiceParameter(\n name=\"x5\",\n parameter_type=ParameterType.STRING,\n values=[\"one\", \"two\", \"three\"],\n ),\n )\n self.assertEqual(\n ax_client._experiment.optimization_config.outcome_constraints[0],\n OutcomeConstraint(\n metric=Metric(name=\"some_metric\"),\n op=ComparisonOp.GEQ,\n bound=3.0,\n relative=False,\n ),\n )\n self.assertEqual(\n ax_client._experiment.optimization_config.outcome_constraints[1],\n OutcomeConstraint(\n metric=Metric(name=\"some_metric\"),\n op=ComparisonOp.LEQ,\n bound=4.0,\n relative=False,\n ),\n )\n self.assertTrue(ax_client._experiment.optimization_config.objective.minimize)\n\n def test_constraint_same_as_objective(self):\n \"\"\"Check that we do not allow constraints on the objective metric.\"\"\"\n ax_client = AxClient(\n GenerationStrategy(\n steps=[GenerationStep(model=Models.SOBOL, num_trials=30)]\n )\n )\n with self.assertRaises(ValueError):\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x3\", \"type\": \"fixed\", \"value\": 2, \"value_type\": \"int\"}\n ],\n objective_name=\"test_objective\",\n outcome_constraints=[\"test_objective >= 3\"],\n )\n\n def test_raw_data_format(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n for _ in range(6):\n parameterization, trial_index = ax_client.get_next_trial()\n x, y = parameterization.get(\"x\"), parameterization.get(\"y\")\n ax_client.complete_trial(trial_index, raw_data=(branin(x, y), 0.0))\n with self.assertRaisesRegex(ValueError, \"Raw data has an invalid type\"):\n ax_client.update_trial_data(trial_index, raw_data=\"invalid_data\")\n\n def test_raw_data_format_with_fidelities(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 1.0]},\n ],\n minimize=True,\n )\n for _ in range(6):\n parameterization, trial_index = ax_client.get_next_trial()\n x, y = parameterization.get(\"x\"), parameterization.get(\"y\")\n ax_client.complete_trial(\n trial_index,\n raw_data=[\n ({\"y\": y / 2.0}, {\"objective\": (branin(x, y / 2.0), 0.0)}),\n ({\"y\": y}, {\"objective\": (branin(x, y), 0.0)}),\n ],\n )\n\n def test_keep_generating_without_data(self):\n # Check that normally numebr of arms to generate is enforced.\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n for _ in range(5):\n parameterization, trial_index = ax_client.get_next_trial()\n with self.assertRaisesRegex(DataRequiredError, \"All trials for current model\"):\n ax_client.get_next_trial()\n # Check thatwith enforce_sequential_optimization off, we can keep\n # generating.\n ax_client = AxClient(enforce_sequential_optimization=False)\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n self.assertFalse(\n ax_client.generation_strategy._steps[0].enforce_num_trials, False\n )\n self.assertFalse(ax_client.generation_strategy._steps[1].max_parallelism, None)\n for _ in range(10):\n parameterization, trial_index = ax_client.get_next_trial()\n\n def test_trial_completion(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n params, idx = ax_client.get_next_trial()\n # Can't update before completing.\n with self.assertRaisesRegex(ValueError, \".* not yet\"):\n ax_client.update_trial_data(\n trial_index=idx, raw_data={\"objective\": (0, 0.0)}\n )\n ax_client.complete_trial(trial_index=idx, raw_data={\"objective\": (0, 0.0)})\n # Cannot complete a trial twice, should use `update_trial_data`.\n with self.assertRaisesRegex(ValueError, \".* already been completed\"):\n ax_client.complete_trial(trial_index=idx, raw_data={\"objective\": (0, 0.0)})\n # Cannot update trial data with observation for a metric it already has.\n with self.assertRaisesRegex(ValueError, \".* contained an observation\"):\n ax_client.update_trial_data(\n trial_index=idx, raw_data={\"objective\": (0, 0.0)}\n )\n # Same as above, except objective name should be getting inferred.\n with self.assertRaisesRegex(ValueError, \".* contained an observation\"):\n ax_client.update_trial_data(trial_index=idx, raw_data=1.0)\n ax_client.update_trial_data(trial_index=idx, raw_data={\"m1\": (1, 0.0)})\n metrics_in_data = ax_client.experiment.fetch_data().df[\"metric_name\"].values\n self.assertIn(\"m1\", metrics_in_data)\n self.assertIn(\"objective\", metrics_in_data)\n self.assertEqual(ax_client.get_best_parameters()[0], params)\n params2, idy = ax_client.get_next_trial()\n ax_client.complete_trial(trial_index=idy, raw_data=(-1, 0.0))\n self.assertEqual(ax_client.get_best_parameters()[0], params2)\n params3, idx3 = ax_client.get_next_trial()\n ax_client.complete_trial(\n trial_index=idx3, raw_data=-2, metadata={\"dummy\": \"test\"}\n )\n self.assertEqual(ax_client.get_best_parameters()[0], params3)\n self.assertEqual(\n ax_client.experiment.trials.get(2).run_metadata.get(\"dummy\"), \"test\"\n )\n best_trial_values = ax_client.get_best_parameters()[1]\n self.assertEqual(best_trial_values[0], {\"objective\": -2.0})\n self.assertTrue(math.isnan(best_trial_values[1][\"objective\"][\"objective\"]))\n\n def test_abandon_trial(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n\n # An abandoned trial adds no data.\n params, idx = ax_client.get_next_trial()\n ax_client.abandon_trial(trial_index=idx)\n data = ax_client.experiment.fetch_data()\n self.assertEqual(len(data.df.index), 0)\n\n # Can't update a completed trial.\n params2, idx2 = ax_client.get_next_trial()\n ax_client.complete_trial(trial_index=idx2, raw_data={\"objective\": (0, 0.0)})\n with self.assertRaisesRegex(ValueError, \".* in a terminal state.\"):\n ax_client.abandon_trial(trial_index=idx2)\n\n def test_ttl_trial(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n\n # A ttl trial that ends adds no data.\n params, idx = ax_client.get_next_trial(ttl_seconds=1)\n self.assertTrue(ax_client.experiment.trials.get(idx).status.is_running)\n time.sleep(1) # Wait for TTL to elapse.\n self.assertTrue(ax_client.experiment.trials.get(idx).status.is_failed)\n # Also make sure we can no longer complete the trial as it is failed.\n with self.assertRaisesRegex(\n ValueError, \".* has been marked FAILED, so it no longer expects data.\"\n ):\n ax_client.complete_trial(trial_index=idx, raw_data={\"objective\": (0, 0.0)})\n\n params2, idy = ax_client.get_next_trial(ttl_seconds=1)\n ax_client.complete_trial(trial_index=idy, raw_data=(-1, 0.0))\n self.assertEqual(ax_client.get_best_parameters()[0], params2)\n\n def test_start_and_end_time_in_trial_completion(self):\n start_time = current_timestamp_in_millis()\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n params, idx = ax_client.get_next_trial()\n ax_client.complete_trial(\n trial_index=idx,\n raw_data=1.0,\n metadata={\n \"start_time\": start_time,\n \"end_time\": current_timestamp_in_millis(),\n },\n )\n dat = ax_client.experiment.fetch_data().df\n self.assertGreater(dat[\"end_time\"][0], dat[\"start_time\"][0])\n\n def test_fail_on_batch(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n batch_trial = ax_client.experiment.new_batch_trial(\n generator_run=GeneratorRun(\n arms=[\n Arm(parameters={\"x\": 0, \"y\": 1}),\n Arm(parameters={\"x\": 0, \"y\": 1}),\n ]\n )\n )\n with self.assertRaises(NotImplementedError):\n ax_client.complete_trial(batch_trial.index, 0)\n\n def test_log_failure(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n _, idx = ax_client.get_next_trial()\n ax_client.log_trial_failure(idx, metadata={\"dummy\": \"test\"})\n self.assertTrue(ax_client.experiment.trials.get(idx).status.is_failed)\n self.assertEqual(\n ax_client.experiment.trials.get(idx).run_metadata.get(\"dummy\"), \"test\"\n )\n with self.assertRaisesRegex(ValueError, \".* no longer expects\"):\n ax_client.complete_trial(idx, {})\n\n def test_attach_trial_and_get_trial_parameters(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n params, idx = ax_client.attach_trial(parameters={\"x\": 0.0, \"y\": 1.0})\n ax_client.complete_trial(trial_index=idx, raw_data=5)\n self.assertEqual(ax_client.get_best_parameters()[0], params)\n self.assertEqual(\n ax_client.get_trial_parameters(trial_index=idx), {\"x\": 0, \"y\": 1}\n )\n with self.assertRaises(ValueError):\n ax_client.get_trial_parameters(\n trial_index=10\n ) # No trial #10 in experiment.\n with self.assertRaisesRegex(ValueError, \".* is of type\"):\n ax_client.attach_trial({\"x\": 1, \"y\": 2})\n\n def test_attach_trial_ttl_seconds(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n params, idx = ax_client.attach_trial(\n parameters={\"x\": 0.0, \"y\": 1.0}, ttl_seconds=1\n )\n self.assertTrue(ax_client.experiment.trials.get(idx).status.is_running)\n time.sleep(1) # Wait for TTL to elapse.\n self.assertTrue(ax_client.experiment.trials.get(idx).status.is_failed)\n # Also make sure we can no longer complete the trial as it is failed.\n with self.assertRaisesRegex(\n ValueError, \".* has been marked FAILED, so it no longer expects data.\"\n ):\n ax_client.complete_trial(trial_index=idx, raw_data=5)\n\n params2, idx2 = ax_client.attach_trial(\n parameters={\"x\": 0.0, \"y\": 1.0}, ttl_seconds=1\n )\n ax_client.complete_trial(trial_index=idx2, raw_data=5)\n self.assertEqual(ax_client.get_best_parameters()[0], params2)\n self.assertEqual(\n ax_client.get_trial_parameters(trial_index=idx2), {\"x\": 0, \"y\": 1}\n )\n\n def test_attach_trial_numpy(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n params, idx = ax_client.attach_trial(parameters={\"x\": 0.0, \"y\": 1.0})\n ax_client.complete_trial(trial_index=idx, raw_data=np.int32(5))\n self.assertEqual(ax_client.get_best_parameters()[0], params)\n\n def test_relative_oc_without_sq(self):\n \"\"\"Must specify status quo to have relative outcome constraint.\"\"\"\n ax_client = AxClient()\n with self.assertRaises(ValueError):\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n objective_name=\"test_objective\",\n minimize=True,\n outcome_constraints=[\"some_metric <= 4.0%\"],\n )\n\n def test_recommended_parallelism(self):\n ax_client = AxClient()\n with self.assertRaisesRegex(ValueError, \"No generation strategy\"):\n ax_client.get_max_parallelism()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n self.assertEqual(ax_client.get_max_parallelism(), [(5, 5), (-1, 3)])\n self.assertEqual(\n run_trials_using_recommended_parallelism(\n ax_client, ax_client.get_max_parallelism(), 20\n ),\n 0,\n )\n # With incorrect parallelism setting, the 'need more data' error should\n # still be raised.\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n with self.assertRaisesRegex(DataRequiredError, \"All trials for current model \"):\n run_trials_using_recommended_parallelism(ax_client, [(6, 6), (-1, 3)], 20)\n\n @patch.dict(sys.modules, {\"ax.storage.sqa_store.structs\": None})\n @patch.dict(sys.modules, {\"sqalchemy\": None})\n @patch(\"ax.service.ax_client.DBSettings\", None)\n def test_no_sqa(self):\n # Make sure we couldn't import sqa_store.structs (this could happen when\n # SQLAlchemy is not installed).\n with self.assertRaises(ModuleNotFoundError):\n import ax_client.storage.sqa_store.structs # noqa F401\n # Make sure we can still import ax_client.\n __import__(\"ax.service.ax_client\")\n AxClient() # Make sure we still can instantiate client w/o db settings.\n # DBSettings should be defined in `ax_client` now, but incorrectly typed\n # `db_settings` argument should still make instantiation fail.\n with self.assertRaisesRegex(ValueError, \"`db_settings` argument should \"):\n AxClient(db_settings=\"badly_typed_db_settings\")\n\n def test_plotting_validation(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x3\", \"type\": \"fixed\", \"value\": 2, \"value_type\": \"int\"}\n ]\n )\n with self.assertRaisesRegex(ValueError, \".* there are no trials\"):\n ax_client.get_contour_plot()\n with self.assertRaisesRegex(ValueError, \".* there are no trials\"):\n ax_client.get_feature_importances()\n ax_client.get_next_trial()\n with self.assertRaisesRegex(ValueError, \".* less than 2 parameters\"):\n ax_client.get_contour_plot()\n ax_client = AxClient()\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ]\n )\n ax_client.get_next_trial()\n with self.assertRaisesRegex(ValueError, \"If `param_x` is provided\"):\n ax_client.get_contour_plot(param_x=\"y\")\n with self.assertRaisesRegex(ValueError, \"If `param_x` is provided\"):\n ax_client.get_contour_plot(param_y=\"y\")\n with self.assertRaisesRegex(ValueError, 'Parameter \"x3\"'):\n ax_client.get_contour_plot(param_x=\"x3\", param_y=\"x3\")\n with self.assertRaisesRegex(ValueError, 'Parameter \"x4\"'):\n ax_client.get_contour_plot(param_x=\"x\", param_y=\"x4\")\n with self.assertRaisesRegex(ValueError, 'Metric \"nonexistent\"'):\n ax_client.get_contour_plot(\n param_x=\"x\", param_y=\"y\", metric_name=\"nonexistent\"\n )\n with self.assertRaisesRegex(ValueError, \"Could not obtain contour\"):\n ax_client.get_contour_plot(\n param_x=\"x\", param_y=\"y\", metric_name=\"objective\"\n )\n with self.assertRaisesRegex(ValueError, \"Could not obtain feature\"):\n ax_client.get_feature_importances()\n\n def test_sqa_storage(self):\n init_test_engine_and_session_factory(force_init=True)\n config = SQAConfig()\n encoder = Encoder(config=config)\n decoder = Decoder(config=config)\n db_settings = DBSettings(encoder=encoder, decoder=decoder)\n ax_client = AxClient(db_settings=db_settings)\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n for _ in range(5):\n parameters, trial_index = ax_client.get_next_trial()\n ax_client.complete_trial(\n trial_index=trial_index, raw_data=branin(*parameters.values())\n )\n gs = ax_client.generation_strategy\n ax_client = AxClient(db_settings=db_settings)\n ax_client.load_experiment_from_database(\"test_experiment\")\n # Trial #4 was completed after the last time the generation strategy\n # generated candidates, so pre-save generation strategy was not\n # \"aware\" of completion of trial #4. Post-restoration generation\n # strategy is aware of it, however, since it gets restored with most\n # up-to-date experiment data. Do adding trial #4 to the seen completed\n # trials of pre-storage GS to check their equality otherwise.\n gs._seen_trial_indices_by_status[TrialStatus.COMPLETED].add(4)\n self.assertEqual(gs, ax_client.generation_strategy)\n with self.assertRaises(ValueError):\n # Overwriting existing experiment.\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n with self.assertRaises(ValueError):\n # Overwriting existing experiment with overwrite flag with present\n # DB settings. This should fail as we no longer allow overwriting\n # experiments stored in the DB.\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[{\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]}],\n overwrite_existing_experiment=True,\n )\n # Original experiment should still be in DB and not have been overwritten.\n self.assertEqual(len(ax_client.experiment.trials), 5)\n\n def test_overwrite(self):\n init_test_engine_and_session_factory(force_init=True)\n ax_client = AxClient()\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n\n # Log a trial\n parameters, trial_index = ax_client.get_next_trial()\n ax_client.complete_trial(\n trial_index=trial_index, raw_data=branin(*parameters.values())\n )\n\n with self.assertRaises(ValueError):\n # Overwriting existing experiment.\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n )\n # Overwriting existing experiment with overwrite flag.\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x1\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"x2\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n overwrite_existing_experiment=True,\n )\n # There should be no trials, as we just put in a fresh experiment.\n self.assertEqual(len(ax_client.experiment.trials), 0)\n\n # Log a trial\n parameters, trial_index = ax_client.get_next_trial()\n self.assertIn(\"x1\", parameters.keys())\n self.assertIn(\"x2\", parameters.keys())\n ax_client.complete_trial(\n trial_index=trial_index, raw_data=branin(*parameters.values())\n )\n\n def test_fixed_random_seed_reproducibility(self):\n ax_client = AxClient(random_seed=239)\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ]\n )\n for _ in range(5):\n params, idx = ax_client.get_next_trial()\n ax_client.complete_trial(idx, branin(params.get(\"x\"), params.get(\"y\")))\n trial_parameters_1 = [\n t.arm.parameters for t in ax_client.experiment.trials.values()\n ]\n ax_client = AxClient(random_seed=239)\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ]\n )\n for _ in range(5):\n params, idx = ax_client.get_next_trial()\n ax_client.complete_trial(idx, branin(params.get(\"x\"), params.get(\"y\")))\n trial_parameters_2 = [\n t.arm.parameters for t in ax_client.experiment.trials.values()\n ]\n self.assertEqual(trial_parameters_1, trial_parameters_2)\n\n def test_init_position_saved(self):\n ax_client = AxClient(random_seed=239)\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n name=\"sobol_init_position_test\",\n )\n for _ in range(4):\n # For each generated trial, snapshot the client before generating it,\n # then recreate client, regenerate the trial and compare the trial\n # generated before and after snapshotting. If the state of Sobol is\n # recorded correctly, the newly generated trial will be the same as\n # the one generated before the snapshotting.\n serialized = ax_client.to_json_snapshot()\n params, idx = ax_client.get_next_trial()\n ax_client = AxClient.from_json_snapshot(serialized)\n with self.subTest(ax=ax_client, params=params, idx=idx):\n new_params, new_idx = ax_client.get_next_trial()\n self.assertEqual(params, new_params)\n self.assertEqual(idx, new_idx)\n self.assertEqual(\n ax_client.experiment.trials[\n idx\n ]._generator_run._model_state_after_gen[\"init_position\"],\n idx + 1,\n )\n ax_client.complete_trial(idx, branin(params.get(\"x\"), params.get(\"y\")))\n\n def test_unnamed_experiment_snapshot(self):\n ax_client = AxClient(random_seed=239)\n ax_client.create_experiment(\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ]\n )\n serialized = ax_client.to_json_snapshot()\n ax_client = AxClient.from_json_snapshot(serialized)\n self.assertIsNone(ax_client.experiment._name)\n\n @patch(\n \"ax.modelbridge.base.observations_from_data\",\n autospec=True,\n return_value=([get_observation1()]),\n )\n @patch(\n \"ax.modelbridge.random.RandomModelBridge.get_training_data\",\n autospec=True,\n return_value=([get_observation1()]),\n )\n @patch(\n \"ax.modelbridge.random.RandomModelBridge._predict\",\n autospec=True,\n return_value=[get_observation1trans().data],\n )\n def test_get_model_predictions(self, _predict, _tr_data, _obs_from_data):\n ax_client = AxClient()\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n objective_name=\"a\",\n )\n ax_client.get_next_trial()\n ax_client.experiment.trials[0].arm._name = \"1_1\"\n self.assertEqual(ax_client.get_model_predictions(), {0: {\"a\": (9.0, 1.0)}})\n\n def test_deprecated_save_load_method_errors(self):\n ax_client = AxClient()\n with self.assertRaises(NotImplementedError):\n ax_client.save()\n with self.assertRaises(NotImplementedError):\n ax_client.load()\n with self.assertRaises(NotImplementedError):\n ax_client.load_experiment(\"test_experiment\")\n with self.assertRaises(NotImplementedError):\n ax_client.get_recommended_max_parallelism()\n\n def test_find_last_trial_with_parameterization(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n objective_name=\"a\",\n )\n params, trial_idx = ax_client.get_next_trial()\n found_trial_idx = ax_client._find_last_trial_with_parameterization(\n parameterization=params\n )\n self.assertEqual(found_trial_idx, trial_idx)\n # Check that it's indeed the _last_ trial with params that is found.\n _, new_trial_idx = ax_client.attach_trial(parameters=params)\n found_trial_idx = ax_client._find_last_trial_with_parameterization(\n parameterization=params\n )\n self.assertEqual(found_trial_idx, new_trial_idx)\n with self.assertRaisesRegex(ValueError, \"No .* matches\"):\n found_trial_idx = ax_client._find_last_trial_with_parameterization(\n parameterization={k: v + 1.0 for k, v in params.items()}\n )\n\n def test_verify_parameterization(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n objective_name=\"a\",\n )\n params, trial_idx = ax_client.get_next_trial()\n self.assertTrue(\n ax_client.verify_trial_parameterization(\n trial_index=trial_idx, parameterization=params\n )\n )\n # Make sure it still works if ordering in the parameterization is diff.\n self.assertTrue(\n ax_client.verify_trial_parameterization(\n trial_index=trial_idx,\n parameterization={k: params[k] for k in reversed(list(params.keys()))},\n )\n )\n self.assertFalse(\n ax_client.verify_trial_parameterization(\n trial_index=trial_idx,\n parameterization={k: v + 1.0 for k, v in params.items()},\n )\n )\n\n def test_tracking_metric_addition(self):\n ax_client = AxClient()\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n objective_name=\"a\",\n )\n params, trial_idx = ax_client.get_next_trial()\n self.assertEqual(list(ax_client.experiment.metrics.keys()), [\"a\"])\n ax_client.complete_trial(trial_index=trial_idx, raw_data={\"a\": 1.0, \"b\": 2.0})\n self.assertEqual(list(ax_client.experiment.metrics.keys()), [\"b\", \"a\"])\n\n @patch(\n \"ax.core.experiment.Experiment.new_trial\",\n side_effect=RuntimeError(\"cholesky_cpu error - bad matrix\"),\n )\n def test_annotate_exception(self, _):\n ax_client = AxClient()\n ax_client.create_experiment(\n name=\"test_experiment\",\n parameters=[\n {\"name\": \"x\", \"type\": \"range\", \"bounds\": [-5.0, 10.0]},\n {\"name\": \"y\", \"type\": \"range\", \"bounds\": [0.0, 15.0]},\n ],\n minimize=True,\n objective_name=\"a\",\n )\n with self.assertRaisesRegex(\n expected_exception=RuntimeError,\n expected_regex=\"Cholesky errors typically occur\",\n ):\n ax_client.get_next_trial()\n" ]
[ [ "numpy.int32" ] ]
bolecodex/amazon-sagemaker-immersion-day
[ "2894145bb0abd4961cb0e0b7c6d1e89264b76716" ]
[ "lab3/mnist-2.py" ]
[ "import tensorflow as tf\nimport argparse\nimport os\nimport numpy as np\nimport json\n\n\ndef model(x_train, y_train, x_test, y_test):\n \"\"\"Generate a simple model\"\"\"\n # Sequential: 创建顺序模型, 此模型为最简单的线性、从头到尾的结构顺序,不分叉,是多个网络层的线性堆叠。 参数:数组中的内容为模型中的层次结构\n model = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(), # Flatten层用来将输入“压平”,即把多维的输入一维化处理\n tf.keras.layers.Dense(1024, activation=tf.nn.relu), # 该层网络有1024个神经元, 激励函数为: relu\n tf.keras.layers.Dropout(0.4), # 利用Dropout函数防止过拟合, 表示将有多少神经元暂时从网络中丢弃\n tf.keras.layers.Dense(10, activation=tf.nn.softmax) # 该层网络有10个神经元, 激励函数为: softmax\n ])\n\n # 对模型进行compile操作, 使用的优化器为 adam, 损失函数为: sparse_categorical_crossentropy, 监控指标为 accuracy\n # adam 优化器: 是对SGD的扩展, 实现简单,计算高效,对内存需求少.自动调整学习率,很适合应用于大规模的数据及参数的场景.适用于梯度稀疏或梯度存在很大噪声的问题\n # sparse_categorical_crossentropy 损失函数: categorical_crossentropy和sparse_categorical_crossentropy都是计算多分类crossentropy的,只是对y的格式要求不同。\n # 1)如果是categorical_crossentropy,那y必须是one-hot处理过的\n # 2)如果是sparse_categorical_crossentropy,那y就是原始的整数形式.\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n # 训练数据 \n model.fit(x_train, y_train)\n\n # 验证数据\n model.evaluate(x_test, y_test)\n\n return model\n\n\ndef _load_training_data(base_dir):\n \"\"\"Load MNIST training data\"\"\"\n x_train = np.load(os.path.join(base_dir, 'train_data.npy'))\n y_train = np.load(os.path.join(base_dir, 'train_labels.npy'))\n return x_train, y_train\n\n\ndef _load_testing_data(base_dir):\n \"\"\"Load MNIST testing data\"\"\"\n x_test = np.load(os.path.join(base_dir, 'eval_data.npy'))\n y_test = np.load(os.path.join(base_dir, 'eval_labels.npy'))\n return x_test, y_test\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser()\n\n # Data, model, and output directories\n # model_dir is always passed in from SageMaker. By default this is a S3 path under the default bucket.\n parser.add_argument('--model_dir', type=str)\n parser.add_argument('--sm-model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))\n parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAINING'))\n parser.add_argument('--hosts', type=list, default=json.loads(os.environ.get('SM_HOSTS')))\n parser.add_argument('--current-host', type=str, default=os.environ.get('SM_CURRENT_HOST'))\n\n return parser.parse_known_args()\n\n\nif __name__ == \"__main__\":\n args, unknown = _parse_args()\n\n # 加载训练数据和测试数据\n train_data, train_labels = _load_training_data(args.train)\n eval_data, eval_labels = _load_testing_data(args.train)\n\n # 创建模型\n mnist_classifier = model(train_data, train_labels, eval_data, eval_labels)\n\n if args.current_host == args.hosts[0]:\n # save model to an S3 directory with version number '00000001' in Tensorflow SavedModel Format\n # To export the model as h5 format use model.save('my_model.h5')\n # 将模型进行导出, 默认格式为 h5 形式、\n mnist_classifier.save(os.path.join(args.sm_model_dir, '000000001'))\n" ]
[ [ "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Dropout" ] ]
onboarding92/AutonomousDrive
[ "80045ffd15ba9ee5b7c39ac7ffa9325616588cff" ]
[ "utils/utils.py" ]
[ "from __future__ import division\nimport math\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\ndef load_classes(path):\n \"\"\"\n Loads class labels at 'path'\n \"\"\"\n fp = open(path, \"r\")\n names = fp.read().split(\"\\n\")[:-1]\n return names\n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\n\ndef compute_ap(recall, precision):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n\n # Arguments\n recall: The recall curve (list).\n precision: The precision curve (list).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.0], recall, [1.0]))\n mpre = np.concatenate(([0.0], precision, [0.0]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef bbox_iou(box1, box2, x1y1x2y2=True):\n \"\"\"\n Returns the IoU of two bounding boxes\n \"\"\"\n if not x1y1x2y2:\n # Transform from center and width to exact coordinates\n b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2\n b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2\n b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2\n b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2\n else:\n # Get the coordinates of bounding boxes\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]\n\n # get the corrdinates of the intersection rectangle\n inter_rect_x1 = torch.max(b1_x1, b2_x1)\n inter_rect_y1 = torch.max(b1_y1, b2_y1)\n inter_rect_x2 = torch.min(b1_x2, b2_x2)\n inter_rect_y2 = torch.min(b1_y2, b2_y2)\n # Intersection area\n inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(\n inter_rect_y2 - inter_rect_y1 + 1, min=0\n )\n # Union Area\n b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)\n b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)\n\n iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)\n\n return iou\n\n\ndef bbox_iou_numpy(box1, box2):\n \"\"\"Computes IoU between bounding boxes.\n Parameters\n ----------\n box1 : ndarray\n (N, 4) shaped array with bboxes\n box2 : ndarray\n (M, 4) shaped array with bboxes\n Returns\n -------\n : ndarray\n (N, M) shaped array with IoUs\n \"\"\"\n area = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1])\n\n iw = np.minimum(np.expand_dims(box1[:, 2], axis=1), box2[:, 2]) - np.maximum(\n np.expand_dims(box1[:, 0], 1), box2[:, 0]\n )\n ih = np.minimum(np.expand_dims(box1[:, 3], axis=1), box2[:, 3]) - np.maximum(\n np.expand_dims(box1[:, 1], 1), box2[:, 1]\n )\n\n iw = np.maximum(iw, 0)\n ih = np.maximum(ih, 0)\n\n ua = np.expand_dims((box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]), axis=1) + area - iw * ih\n\n ua = np.maximum(ua, np.finfo(float).eps)\n\n intersection = iw * ih\n\n return intersection / ua\n\n\ndef non_max_suppression(prediction, num_classes, conf_thres=0.5, nms_thres=0.4):\n \"\"\"\n Removes detections with lower object confidence score than 'conf_thres' and performs\n Non-Maximum Suppression to further filter detections.\n Returns detections with shape:\n (x1, y1, x2, y2, object_conf, class_score, class_pred)\n \"\"\"\n\n # From (center x, center y, width, height) to (x1, y1, x2, y2)\n box_corner = prediction.new(prediction.shape)\n box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2\n box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2\n box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2\n box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2\n prediction[:, :, :4] = box_corner[:, :, :4]\n\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n # Filter out confidence scores below threshold\n conf_mask = (image_pred[:, 4] >= conf_thres).squeeze()\n image_pred = image_pred[conf_mask]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Get score and class with highest confidence\n class_conf, class_pred = torch.max(image_pred[:, 5 : 5 + num_classes], 1, keepdim=True)\n # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n detections = torch.cat((image_pred[:, :5], class_conf.float(), class_pred.float()), 1)\n # Iterate through all predicted classes\n unique_labels = detections[:, -1].cpu().unique()\n if prediction.is_cuda:\n unique_labels = unique_labels.cuda()\n for c in unique_labels:\n # Get the detections with the particular class\n detections_class = detections[detections[:, -1] == c]\n # Sort the detections by maximum objectness confidence\n _, conf_sort_index = torch.sort(detections_class[:, 4], descending=True)\n detections_class = detections_class[conf_sort_index]\n # Perform non-maximum suppression\n max_detections = []\n while detections_class.size(0):\n # Get detection with highest confidence and save as max detection\n max_detections.append(detections_class[0].unsqueeze(0))\n # Stop if we're at the last detection\n if len(detections_class) == 1:\n break\n # Get the IOUs for all boxes with lower confidence\n ious = bbox_iou(max_detections[-1], detections_class[1:])\n # Remove detections with IoU >= NMS threshold\n detections_class = detections_class[1:][ious < nms_thres]\n\n max_detections = torch.cat(max_detections).data\n # Add max detections to outputs\n output[image_i] = (\n max_detections if output[image_i] is None else torch.cat((output[image_i], max_detections))\n )\n\n return output\n\n\ndef build_targets(\n pred_boxes, pred_conf, pred_cls, target, anchors, num_anchors, num_classes, grid_size, ignore_thres, img_dim\n):\n nB = target.size(0)\n nA = num_anchors\n nC = num_classes\n nG = grid_size\n mask = torch.zeros(nB, nA, nG, nG)\n conf_mask = torch.ones(nB, nA, nG, nG)\n tx = torch.zeros(nB, nA, nG, nG)\n ty = torch.zeros(nB, nA, nG, nG)\n tw = torch.zeros(nB, nA, nG, nG)\n th = torch.zeros(nB, nA, nG, nG)\n tconf = torch.ByteTensor(nB, nA, nG, nG).fill_(0)\n tcls = torch.ByteTensor(nB, nA, nG, nG, nC).fill_(0)\n\n nGT = 0\n nCorrect = 0\n for b in range(nB):\n for t in range(target.shape[1]):\n if target[b, t].sum() == 0:\n continue\n nGT += 1\n # Convert to position relative to box\n gx = target[b, t, 1] * nG\n gy = target[b, t, 2] * nG\n gw = target[b, t, 3] * nG\n gh = target[b, t, 4] * nG\n # Get grid box indices\n gi = int(gx)\n gj = int(gy)\n # Get shape of gt box\n gt_box = torch.FloatTensor(np.array([0, 0, gw, gh])).unsqueeze(0)\n # Get shape of anchor box\n anchor_shapes = torch.FloatTensor(np.concatenate((np.zeros((len(anchors), 2)), np.array(anchors)), 1))\n # Calculate iou between gt and anchor shapes\n anch_ious = bbox_iou(gt_box, anchor_shapes)\n # Where the overlap is larger than threshold set mask to zero (ignore)\n conf_mask[b, anch_ious > ignore_thres, gj, gi] = 0\n # Find the best matching anchor box\n best_n = np.argmax(anch_ious)\n # Get ground truth box\n gt_box = torch.FloatTensor(np.array([gx, gy, gw, gh])).unsqueeze(0)\n # Get the best prediction\n pred_box = pred_boxes[b, best_n, gj, gi].unsqueeze(0)\n # Masks\n mask[b, best_n, gj, gi] = 1\n conf_mask[b, best_n, gj, gi] = 1\n # Coordinates\n tx[b, best_n, gj, gi] = gx - gi\n ty[b, best_n, gj, gi] = gy - gj\n # Width and height\n tw[b, best_n, gj, gi] = math.log(gw / anchors[best_n][0] + 1e-16)\n th[b, best_n, gj, gi] = math.log(gh / anchors[best_n][1] + 1e-16)\n # One-hot encoding of label\n target_label = int(target[b, t, 0])\n tcls[b, best_n, gj, gi, target_label] = 1\n tconf[b, best_n, gj, gi] = 1\n\n # Calculate iou between ground truth and best matching prediction\n iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False)\n pred_label = torch.argmax(pred_cls[b, best_n, gj, gi])\n score = pred_conf[b, best_n, gj, gi]\n if iou > 0.5 and pred_label == target_label and score > 0.5:\n nCorrect += 1\n\n return nGT, nCorrect, mask, conf_mask, tx, ty, tw, th, tconf, tcls\n\n\ndef to_categorical(y, num_classes):\n \"\"\" 1-hot encodes a tensor \"\"\"\n return torch.from_numpy(np.eye(num_classes, dtype=\"uint8\")[y])\n\n\n\"\"\"SEGMENTATION PART\"\"\"\n\n\ndef label_img_to_color(img):\n label_to_color = {\n 0: [128, 64,128],\n 1: [244, 35,232],\n 2: [ 70, 70, 70],\n 3: [102,102,156],\n 4: [190,153,153],\n 5: [153,153,153],\n 6: [250,170, 30],\n 7: [220,220, 0],\n 8: [107,142, 35],\n 9: [152,251,152],\n 10: [ 70,130,180],\n 11: [220, 20, 60],\n 12: [255, 0, 0],\n 13: [ 0, 0,142],\n 14: [ 0, 0, 70],\n 15: [ 0, 60,100],\n 16: [ 0, 80,100],\n 17: [ 0, 0,230],\n 18: [119, 11, 32],\n 19: [81, 0, 81]\n }\n\n img_height, img_width = img.shape\n\n img_color = np.zeros((img_height, img_width, 3))\n for row in range(img_height):\n for col in range(img_width):\n label = img[row, col]\n\n img_color[row, col] = np.array(label_to_color[label])\n\n return img_color\n" ]
[ [ "numpy.expand_dims", "torch.max", "torch.zeros", "torch.cat", "numpy.concatenate", "numpy.where", "torch.ones", "numpy.eye", "numpy.finfo", "numpy.argmax", "torch.sort", "numpy.zeros", "torch.nn.init.constant_", "torch.min", "torch.nn.init.normal_", "numpy.array", "numpy.sum", "torch.ByteTensor", "numpy.maximum", "torch.clamp", "torch.argmax" ] ]
disease-data-intelligence/3G_weight_loss_prediction
[ "476ced900f5a4595877257e38eccbe5e53cb64c2" ]
[ "ML_scripts/ensemble_scoring.py" ]
[ "#!/usr/bin/python3\n\n# Import packages\nseed = 42\nimport numpy as np\nnp.random.seed(seed) # Set numpy random seed\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve\nimport utils as u\n\n###############################\n# Ensemble predictions\n###############################\n# Average prediction scores\ndef average_scoring(scores_frame, true_labels_col, drop=False, t=0.5): \n # Extract scores and true labels\n true_labels = scores_frame[true_labels_col]\n if drop: \n scores = scores_frame.drop(labels=[true_labels_col]+drop, axis=1)\n else: \n scores = scores_frame.drop(labels=[true_labels_col], axis=1)\n \n # Get mean of each score\n means = np.nanmean(scores, axis=1)\n \n # Get metrics\n auc_score, sensitivity, specificity, mcc, con = u.performance_metrics(y_true=true_labels, y_pred=means, threshold=t)\n \n # Get ROC_AUC parameters\n fpr, tpr, thresholds = roc_curve(y_true=true_labels, y_score=means)\n \n return pd.Series(means, index=scores.index, name='mean'), [auc_score, sensitivity, specificity, mcc], (fpr, tpr)\n\n# Majority voting\ndef majority_voting(scores_frame, true_labels_col, drop=False, t=0.5): \n # Extract scores and true labels\n true_labels = scores_frame[true_labels_col]\n if drop: \n scores = scores_frame.drop(labels=[true_labels_col]+drop, axis=1)\n else: \n scores = scores_frame.drop(labels=[true_labels_col], axis=1)\n \n # Change scores to predicted class\n preds = scores.copy()\n preds[preds < t] = 0\n preds[preds >= t] = 1\n \n # Get majority class\n majorities = []\n for ID in preds.index: \n row = preds.loc[ID]\n mean = np.nanmean(row)\n if mean < t: \n majorities.append(0)\n else: \n majorities.append(1)\n #majority = pd.DataFrame(majorities, columns=['preds'], index=preds.index)\n \n # Get metrics\n auc_score, sensitivity, specificity, mcc, con = u.performance_metrics(y_true=true_labels, y_pred=majorities, threshold=t)\n \n # Get ROC_AUC parameters\n fpr, tpr, thresholds = roc_curve(y_true=true_labels, y_score=majorities)\n \n return pd.Series(majorities, index=scores.index, name='majority'), [auc_score, sensitivity, specificity, mcc], (fpr, tpr)\n\n# Confident average scores\ndef confident_average(scores_frame, true_labels_col, confidence=0.6, drop=False, verbose=False, t=0.5):\n # Extract scores and true labels\n true_labels = scores_frame[true_labels_col]\n if drop: \n scores = scores_frame.drop(labels=[true_labels_col]+drop, axis=1)\n else: \n scores = scores_frame.drop(labels=[true_labels_col], axis=1)\n\n # Find confident scores for each sample\n confident_scores, labels, ids = [], [], []\n no_con, no_cons = 0, {0: 0, 1: 0}\n for ID in scores.index: \n row = scores.loc[ID]\n con = [v for v in row if v >= confidence or v <= 1-confidence]\n if len(con) < 1: \n #print(row, '\\n')\n #con = [0.5]\n no_con += 1\n if true_labels.loc[ID] == 1: \n no_cons[1] += 1\n else: \n no_cons[0] += 1\n else: \n confident_scores.append(np.nanmean(con))\n labels.append(true_labels.loc[ID])\n ids.append(ID)\n #confident_scores.append(np.nanmean(con))\n\n # Look at samples with no confident scores\n if no_con > 0 and verbose: \n print('Fraction of samples with no confident scores: {0}/{1}'.format(no_con, len(scores.index)))\n print('Classbalance:', no_cons)\n \n # Get metrics\n auc_score, sensitivity, specificity, mcc, con = u.performance_metrics(y_true=labels, y_pred=confident_scores, threshold=t)\n \n # Get ROC_AUC parameters\n fpr, tpr, thresholds = roc_curve(y_true=labels, y_score=confident_scores)\n \n return pd.Series(confident_scores, index=ids, name='con_scor_'+str(round(confidence, 2))), [auc_score, sensitivity, specificity, mcc], (fpr, tpr)\n\n# Confident scores majority voting\ndef confident_majority_voting(scores_frame, true_labels_col, confidence=0.6, drop=False, verbose=False, t=0.5):\n # Extract scores and true labels\n true_labels = scores_frame[true_labels_col]\n if drop: \n scores = scores_frame.drop(labels=[true_labels_col]+drop, axis=1)\n else: \n scores = scores_frame.drop(labels=[true_labels_col], axis=1)\n\n # Find confident scores for each sample\n confident_votes, labels, ids = [], [], []\n no_con, no_cons = 0, {0: 0, 1: 0}\n for ID in scores.index: \n row = scores.loc[ID]\n con = [v for v in row if v >= confidence or v <= 1-confidence]\n if len(con) < 1: \n #print(row, '\\n')\n #con = [0.5]\n no_con += 1\n if true_labels.loc[ID] == 1: \n no_cons[1] += 1\n else: \n no_cons[0] += 1\n else: \n labels.append(true_labels.loc[ID])\n ids.append(ID)\n\n # Change scores to classes\n classes = [1 if c >= t else 0 for c in con]\n mean = np.nanmean(classes)\n if mean < t: \n confident_votes.append(0)\n else: \n confident_votes.append(1)\n\n # Look at samples with no confident scores\n if no_con > 0 and verbose: \n print('Fraction of samples with no confident votes: {0}/{1}'.format(no_con, len(scores.index)))\n print('Classbalance:', no_cons)\n \n # Get metrics\n auc_score, sensitivity, specificity, mcc, con = u.performance_metrics(y_true=labels, y_pred=confident_votes, threshold=t)\n \n # Get ROC_AUC parameters\n fpr, tpr, thresholds = roc_curve(y_true=labels, y_score=confident_votes)\n \n return pd.Series(confident_votes, index=ids, name='con_vot_'+str(round(confidence, 2))), [auc_score, sensitivity, specificity, mcc], (fpr, tpr)\n\n\n#########################################\n# Use all ensemble scoring methods\n#########################################\ndef score_ensemble(scores, true_col='y', drop=False, min_conf=0.6, max_conf=0.99, step=0.05, threshold=0.5, verbose=True): \n # Mean of scores\n means, [auc_score, sens, spec, mcc], (fpr, tpr) = average_scoring(scores_frame=scores, true_labels_col=true_col, drop=drop, t=threshold)\n if verbose: \n print('\\n# Mean scoring: ', auc_score, sens, spec, mcc)\n\n # Set up dataframe to hold info\n ens_perf = pd.DataFrame(data=[round(i, 2) for i in [0, auc_score, sens, spec, mcc]], \n index=['miss', 'auc', 'sens', 'spec', 'mcc'], columns=['mean'])\n N = len(means)\n\n # Majority voting\n majority, [auc_score, sens, spec, mcc], (fpr, tpr) = majority_voting(scores_frame=scores, true_labels_col=true_col, drop=drop, t=threshold)\n if verbose: \n print('# Majority voting: ', auc_score, sens, spec, mcc, '\\n')\n ens_perf['majority_voting'] = [round(i, 2) for i in [0, auc_score, sens, spec, mcc]]\n\n # Combine first two series to dataframe\n ens_scores = pd.concat([means, majority], join='outer', axis=1)\n\n # Mean and majority of confident scores\n if verbose: \n print('\\n### Scoring with threholds on confidence...')\n confidences = np.arange(start=min_conf, stop=max_conf, step=step)\n for con in confidences: \n # Mean\n confident_scores, [auc_score, sens, spec, mcc], (fpr, tpr) = confident_average(scores_frame=scores, true_labels_col=true_col, confidence=con, drop=drop, verbose=verbose, t=threshold)\n if verbose:\n print('# Mean of {} confident scores: '.format(con), auc_score, sens, spec, mcc)\n print()\n ens_perf['con_scor_'+str(round(con, 2))] = [round(i, 2) for i in [(N-len(confident_scores))/N, auc_score, sens, spec, mcc]]\n \n # Majority\n confident_votes, [auc_score, sens, spec, mcc], (fpr, tpr) = confident_majority_voting(scores_frame=scores, true_labels_col=true_col, confidence=con, drop=drop, verbose=verbose, t=threshold)\n if verbose: \n print('# Majority of {} confident votes: '.format(con), auc_score, sens, spec, mcc)\n print()\n ens_perf['con_majority_'+str(round(con, 2))] = [round(i, 2) for i in [(N-len(confident_votes))/N, auc_score, sens, spec, mcc]]\n\n # Add to scores dataframe \n ens_scores = pd.concat([ens_scores, confident_scores, confident_votes], join='outer', axis=1)\n\n ens_scores = pd.concat([scores[true_col], ens_scores], join='outer', axis=1)\n\n print('Shape of performances and scores:', ens_perf.shape, ens_scores.shape)\n\n return ens_perf, ens_scores\n\n\n###################\n# Marianne plot\n###################\ndef show_predictions(scores, target='y', threshold=0.5, path_out=False, verbose=True, figsize=(7, 200)): \n \"\"\"This function will plot which have been correctly classified. The input is \n single dict containing labels as keys and information on each model as values \n in the order [auc_score, ids_test, y_true, y_pred].\n\n all_ids: List, IDs of all samples as strings. \n model_dict: Dict, containing model name as key and [auc_score, ids_test, y_true, y_pred] as value. \n path_out: String, path where to save plot. \n show: If True, show plot. \n \"\"\"\n all_ids = scores.index.tolist()\n N, M = scores.shape\n y_true = scores[target]\n\n # Set up figure to hold IDs vs model type\n f, id_fig = plt.subplots(figsize=figsize)\n id_fig.margins(0.01, 0.01)\n plt.ylabel('Samples (IDs)', fontsize=14)\n plt.xlabel('Models', fontsize=14)\n plt.title('Correctly classified samples', fontsize=20)\n plt.yticks(range(len(all_ids)), all_ids, fontsize=12)\n plt.grid(which='major', linestyle='dashed', linewidth=0.1)\n plt.rc('axes', axisbelow=True)\n cmap = plt.get_cmap('tab20', M)\n\n # Coordinates and legend\n counts = [0 for item in all_ids]\n how_many_correct = dict(zip(all_ids, counts))\n all_ids = dict(zip(all_ids, list(range(len(all_ids)))))\n xticks = []\n height = 0\n legend = []\n\n # Run through each model\n missing_counts = {}\n for col in scores.columns: \n if col != target: \n y_pred = scores[col].dropna(how='any')\n\n # Find correct IDs\n ids_test = []\n pred_labels = [1 if v >= threshold else 0 for v in y_pred]\n for ID, true, pred in zip(y_pred.index, y_true, pred_labels): \n if true == round(pred): \n ids_test.append(ID)\n\n # Count item\n how_many_correct[ID] += 1\n\n # Get correct classifications\n xticks.append(col)\n y = [all_ids[i] for i in ids_test]\n x = [height]*len(y)\n\n # Plot correct IDs\n plot_ids = id_fig.scatter(x=x, y=y, s=15, label=col)\n \n # Plot x for missing IDs\n missing = []\n for ID in all_ids: \n if ID not in missing_counts.keys(): \n missing_counts[ID] = 0\n if ID not in y_pred.index: \n missing.append(ID)\n missing_counts[ID] += 1\n\n if len(missing) > 0: \n y = [all_ids[i] for i in missing]\n x = [height]*len(y)\n id_fig.scatter(x=x, y=y, marker='x', color='black')\n \n legend.append(height)\n height += 1\n\n plt.xticks(legend, xticks, fontsize=12, rotation=90)\n plt.tight_layout()\n plt.show()\n\n if path_out: \n plt.savefig(path_out, dpi=1000, transparent=True)\n return how_many_correct, missing_counts\n \n" ]
[ [ "pandas.concat", "matplotlib.pyplot.tight_layout", "pandas.Series", "numpy.random.seed", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.rc", "matplotlib.pyplot.subplots", "sklearn.metrics.roc_curve", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.savefig", "numpy.nanmean", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
cw-tan/deft
[ "abb4d23fa0bb53031c13daef9942bceba4afd655" ]
[ "test/tools_for_tests.py" ]
[ "import numpy as np\nfrom scipy.special import sph_harm\n\ndef get_box_geometry(vectors):\n \"\"\"expects box vectors in rows of 'vectors'\"\"\"\n\n # get box lengths, angles, and volume\n lengths = np.linalg.norm(vectors, axis=1)\n angles = np.empty(3)\n angles[0] = np.arccos(\n vectors[1,:].dot(vectors[2,:])/(lengths[1]*lengths[2]))\n angles[1] = np.arccos(\n vectors[0,:].dot(vectors[2,:])/(lengths[0]*lengths[2]))\n angles[2] = np.arccos(\n vectors[0,:].dot(vectors[1,:])/(lengths[0]*lengths[1]))\n volume = np.linalg.det(vectors)\n # get reciprocal lattice vectors (rows of 'recip_vectors') and lengths\n recip_vectors = 2.0*np.pi*np.linalg.inv(vectors).T\n recip_lengths = np.linalg.norm(recip_vectors, axis=1)\n return lengths, angles, volume, recip_vectors, recip_lengths\n\ndef get_function_on_grid(function, shape, vectors, r0=None):\n \"\"\" (1) function must take 3d numpy arrays as input\n (2) minimum image convention brings each component to within (0.5,0.5)\n (3) expects box vectors in rows of 'vectors'\n \"\"\"\n if r0 is None:\n r0 = np.zeros(3)\n else:\n r0 = np.linalg.inv(vectors.T).dot(r0) # convert to scaled coords\n # compute possible elements of dr\n x = np.arange(shape[0], dtype='float')/shape[0] - r0[0]\n y = np.arange(shape[1], dtype='float')/shape[1] - r0[1]\n z = np.arange(shape[2], dtype='float')/shape[2] - r0[2]\n # bring components within [0.5,0.5]\n x = x - np.rint(x)\n y = y - np.rint(y)\n z = z - np.rint(z)\n # create grids for vectorized calls\n xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')\n # convert to cartesian coordinates\n x = vectors[0,0]*xx + vectors[1,0]*yy + vectors[2,0]*zz\n y = vectors[0,1]*xx + vectors[1,1]*yy + vectors[2,1]*zz\n z = vectors[0,2]*xx + vectors[1,2]*yy + vectors[2,2]*zz\n # evaluate function\n return function(x,y,z)\n\ndef real_sph_harm(l, m, x, y, z):\n\n r = np.sqrt(x*x+y*y+z*z)\n # compute azimuthal angle, domain of [0,2*pi)\n theta = np.arctan2(y,x)\n theta += (theta<0)*2*np.pi\n # compute polar angle, setting phi=0 for r->0\n phi = np.zeros(np.array(r).shape) # np.array() enables scalar x,y,z\n np.divide(z, r, out=phi, where=(r>1e-12))\n phi = np.arccos(phi)\n # return real spherical harmonic\n if m<0:\n return np.sqrt(2)*(-1)**m*np.imag(sph_harm(np.abs(m),l,theta,phi))\n elif m==0:\n return np.real(sph_harm(m,l,theta,phi))\n else:\n return np.sqrt(2)*(-1)**m*np.real(sph_harm(m,l,theta,phi))\n" ]
[ [ "numpy.sqrt", "numpy.abs", "numpy.linalg.inv", "numpy.arange", "numpy.divide", "numpy.rint", "numpy.linalg.norm", "numpy.arccos", "scipy.special.sph_harm", "numpy.linalg.det", "numpy.arctan2", "numpy.array", "numpy.meshgrid", "numpy.zeros", "numpy.empty" ] ]
npirvin/Radiative-Transport-PV
[ "d5236bfae5789fd2be5bb0190e962a83c24b3ae7" ]
[ "single_cell_power.py" ]
[ "\"\"\" Created on Thu Jun 21 14:31:46 2018\r\nAuthor: Nicholas Irvin \r\n\r\nThis module determines the output power of a cell.\r\nIf using the diffusion model, then spectral.py calculates the total current,\r\n and this module calcualtes the power.\r\nIf not using the diffusion model, then spectral.py calculates photogeneration current \r\n and recombinaiton.py calculates recombinaiton current, and the difference is found here.\r\nFinally the function max-power-pt here maximizes the power. \"\"\"\r\n\r\n# Import libraries\r\nimport numpy as np\r\nfrom scipy.optimize import minimize_scalar, brentq\r\nimport matplotlib.pyplot as plt # useful for plotting IV curves\r\nimport spectral, recombination, sunlight, carrier_models\r\n\r\n\r\n# Set constants\r\nc = 2.99792458e8 \r\n# m/s The speed of light\r\nh = 6.6260755e-34 \r\n# J*s Planck's constant\r\nk = 1.38064852e-23 \r\n# J/K Boltzmann's constant\r\nq = 1.60217653e-19 \r\n# C The charge of an electron\r\ng = 2*np.pi/(c**2)/(h**3) \r\n# s^5/(kg^3*m^8) The coefficient for particle flux\r\nf = 1/46248.5 \r\n# (sr) Sunlight dilution factor, which is surface area of the Sun divided by\r\n# surface area of sphere with radius equal to Sun's distance from Earth\r\n\r\n\r\ndef find_Jgen(E1, E2, volt, photocollection, stack):\r\n \"\"\" Purpose: Calculate short-circuit current Jsc (A m^-2) given energy bounds for radiation.\r\n Concentration is the sunlight concentration in units of suns.\r\n Used by find_current, find_voltage, and max_power_pt. \"\"\"\r\n\r\n rec=stack.rec\r\n # Current generated by incident sunlight/laser_light.\r\n J_Sun = spectral.Photocollection(E1, stack, volt=volt, tau=rec.bulk_lifetime, diffusion_length=rec.diffusion_length, diffusivity=rec.diffusivity, absorptance=photocollection.absorptance, rear_emittance=photocollection.rear_emittance, rec=rec).J\r\n if stack.spectrum == 'Blackbody':\r\n J_Sun = f*J_Sun \r\n return(J_Sun)\r\n \r\n\r\n\r\n\r\ndef find_current(volt, E1, E2, photocollection, stack, Jsc=0, extra_plots='No'):\r\n \"\"\" Purpose: Find net current density (A m^-2). \r\n \r\n Used by max_power_pt. \"\"\" \r\n \r\n stack.extra_plots = 'Yes' if extra_plots == 'Yes' else 'No' # Turned on to tell inner functions to only make an plot. Useful to only plot at a certain voltage.\r\n rec = recombination.Recombination(volt, E1, E2, photocollection, stack)\r\n J_recombination = rec.J_recombination\r\n if Jsc == 0 or stack.voltage_dependent_Jgen == 'On' or stack.diffusion_limited == 'Yes': \r\n Jgen = find_Jgen(E1, E2, volt, stack.photocollection, stack)\r\n else: \r\n Jgen = Jsc\r\n if stack.diffusion_limited == 'No':\r\n J = Jgen - abs(J_recombination)\r\n if stack.diffusion_limited == 'Yes':\r\n J = Jgen\r\n return J\r\n\r\n\r\n\r\n# for special multijunction connections (aks structures). Needs reverification.\r\ndef find_voltage(J, E1, E2, photocollection, stack, Jsc=0): # Redo\r\n \"\"\" Purpose: Calculate voltage as a function of current.\r\n This function can be used for graphing the I-V curve. \r\n Used by max_power_pt. \"\"\"\r\n \r\n if Jsc == 0:\r\n Jsc = find_Jgen(E1, E2, 0, photocollection, stack)\r\n\r\n photocollection = spectral.Photocollection(E1, stack, volt=0) # necessary?\r\n\r\n def current_minus_current(volt): \r\n rec = recombination.Recombination(volt, E1, E2, photocollection, stack)\r\n stack.recombination_data = rec\r\n J_recombination = rec.J_recombination\r\n if Jsc == 0 or stack.voltage_dependent_Jgen == 'On':\r\n Jgen = find_Jgen(E1, E2, volt, photocollection, stack) # Jsc\r\n else: \r\n Jgen = Jsc\r\n if stack.diffusion_limited == 'Yes':\r\n return(Jgen - J) \r\n else:\r\n return(Jgen - abs(J_recombination) - J) \r\n # Finding a zero of 'Jsc - J_rec - J' finds the voltage that \r\n # satisfies the definition 'J = Jsc - J_dark'.\r\n VocCarnot = sunlight.Incident_Light(stack.spectrum, stack, extra=1, Eg=E1).VocCarnot # Maximum possible Voc\r\n volt = brentq(current_minus_current,0, VocCarnot)\r\n if stack.nonradiative_recombination_modeling == 'Yes':\r\n Rseries = stack.thickness*carrier_models.base_resistivity(stack.rec.carriers, stack)*1e-4 # (ohms*m^2)\r\n volt = volt - J*Rseries # Voltage drop due to Rseries\r\n return(volt) \r\n \r\n\r\n\r\n\r\nclass IV_Data:\r\n \"\"\" Get IV data and display IV curve.\r\n Used by max_power_pt.py. Outputs Jsc, Voc, FF\"\"\"\r\n def __init__(self, Vmp, power, E1, E2, photocollection, stack, Jsc):\r\n \r\n if stack.number_of_bandgaps == 1:\r\n def current(volt):\r\n return find_current(volt, E1, E2, photocollection, stack, Jsc)\r\n \r\n # Get Jsc, Voc, FF\r\n Jsc = find_current(0, E1, E2, photocollection, stack)\r\n print('Jsc = ', round(Jsc/10,4), ' mA/cm^2') # This is the Jsc without series resistance)\r\n VocCarnot = sunlight.Incident_Light(stack.spectrum, stack, extra=1).VocCarnot\r\n try:\r\n Voc = brentq(current, 0, VocCarnot) # some advanced-concept features like selective reflectors can make Voc above the bandgap\r\n except:\r\n try:\r\n Voc = brentq(current, 0, E1)\r\n except:\r\n Voc = 0\r\n print('Voc = ', round(Voc,4), 'V.')\r\n FF = power/(Voc*Jsc)\r\n print('FF = ', round(FF,5))\r\n self.Jsc = Jsc # A/m^2 \r\n self.Voc = Voc # V\r\n self.FF = FF # %\r\n self.Vmp = Vmp # V\r\n self.Jmp = current(Vmp) # A/m^2\r\n # stack.Jmp = self.Jmp\r\n \r\n # Radiative info\r\n flux_S = find_Jgen(E1, E2, 0, photocollection, stack)/q\r\n flux_B = spectral.Flux(E1, 10, stack.T_cell, 0, photocollection, stack).front_flux\r\n self.Vdb = k*stack.T_cell/q*np.log(flux_S/flux_B)\r\n self.Jph_mp = find_current(Vmp, E1, E2, photocollection, stack) # A/m^2\r\n\r\n # Extra saved data at the test point\r\n V_test = stack.V_test \r\n if V_test == 'Vmp':\r\n V_test = Vmp\r\n if V_test == 'Voc':\r\n V_test = Voc\r\n stack.V_test = V_test \r\n self.Jph_test = find_current(V_test, E1, E2, photocollection, stack) # A/m^2\r\n rec = stack.rec # should be at V_test\r\n self.rec = rec\r\n if stack.composition != []:\r\n PR = rec.P_PR\r\n else:\r\n PR = 0\r\n self.PR = PR\r\n\r\n # special use - to compare Jrad_abs and Jrad_EQE models for my paper: \r\n abs_flux = spectral.Flux(E1, E2, stack.T_cell, V_test, photocollection, stack, model='absorptance')\r\n self.J_rad_abs = q*abs_flux.flux # A/m^2\r\n \r\n \r\n # # Plot IV curve? then uncomment this block\r\n # if not plt.fignum_exists(4): # Already made a plot\r\n # Vmax = Voc\r\n # V_list = [i*Vmax/300 for i in range(301)]\r\n # I_list = np.array([])\r\n # V_r_list = []\r\n # for volt in V_list:\r\n # I = find_current(volt, E1, E2, photocollection, stack, Jsc)/stack.concentration\r\n # I_list = np.append(I_list, I)\r\n # R_cons = 0 # (ohms-cm^2) add constant series resistance here\r\n # R = (R_cons+stack.thickness*rec.base_resistivity)*1e-4 # (ohms*m^2)\r\n # V_r_list += [volt - I*R] # Voltage drop due to Rseries\r\n # plt.rc('font', family='sans-serif')\r\n # fig, ax = plt.subplots()\r\n # plt.title(stack.bandgap)\r\n # plt.xlim(0, 1.05*stack.bandgap)\r\n # plt.ylim(-.001, 1.1*Jsc/10)\r\n # ax.minorticks_on()\r\n # ax.set_xlabel('Volt (V)')\r\n # ax.set_ylabel('Current (mA/cm$^2$)')\r\n # ax.plot(V_r_list, I_list/10) #/10 for mA.cm^2 \r\n # find_current(Vmp, E1, E2, photocollection, stack, Jsc, extra_plots='Yes')\r\n\r\n\r\n else:\r\n self.Jsc = 0\r\n self.Voc = 0\r\n self.FF = 0\r\n self.Jmp = 0\r\n self.Vmp = 0\r\n self.J_Auger = 0\r\n self.PR = 0\r\n self.Vdb = 0\r\n self.J_rad_front = 0 \r\n self.J_rad_back = 0 \r\n self.Jph = 0\r\n self.ERE = 0\r\n\r\n \r\n \r\n\r\n\r\n# optimize power!\r\ndef max_power_pt(E1, E2, stack):\r\n \"\"\" Purpose: Calculate maximum power (W m^-2) for a cell between voltages E1 \r\n and E2.\r\n Output: voltage at maximum power, maximum power\r\n Used by independent_cell_power in the stack module. \"\"\"\t\t\t\t\t\t\t\t\t\t\t\t\r\n\r\n stack.set_bandgap(E1)\r\n photocollection = spectral.Photocollection(E1, stack, volt=0)\r\n Jsc = find_current(0, E1, E2, photocollection, stack)\r\n \r\n ## Power function to optimize\r\n def power_fun(volt): \r\n current = find_current(volt, E1, E2, photocollection, stack, Jsc)\r\n if stack.nonradiative_recombination_modeling == 'Yes':\r\n Rseries = stack.thickness*carrier_models.base_resistivity(stack.rec.carriers, stack)*1e-4 # (ohms*m^2)\r\n volt = volt - current*Rseries # Voltage drop due to Rseries\r\n return -volt*current\r\n VocCarnot = sunlight.Incident_Light(stack.spectrum, stack, extra=1).VocCarnot # Maximum possible Voc\r\n bound = max(VocCarnot, E1)\r\n res = minimize_scalar(power_fun, bounds=[0, bound], method='Bounded')\r\n power = -res.fun\r\n Vmp = res.x\r\n IV_data = IV_Data(Vmp, power, E1, E2, photocollection, stack, Jsc)\r\n return power, IV_data" ]
[ [ "numpy.log", "scipy.optimize.brentq", "scipy.optimize.minimize_scalar" ] ]
astaff/audio
[ "27a0f7653bc2918e314b4225782d2b29ef31ae4a" ]
[ "torchaudio/_backend.py" ]
[ "from functools import wraps\nfrom typing import Any, List, Union\n\nimport platform\nimport torch\nfrom torch import Tensor\n\nfrom . import _soundfile_backend, _sox_backend\n\n\n_audio_backend = \"soundfile\" if platform.system() == \"Windows\" else \"sox\"\n_audio_backends = {\"sox\": _sox_backend, \"soundfile\": _soundfile_backend}\n\n\ndef set_audio_backend(backend: str) -> None:\n \"\"\"\n Specifies the package used to load.\n Args:\n backend (str): Name of the backend. One of {}.\n \"\"\".format(_audio_backends.keys())\n global _audio_backend\n if backend not in _audio_backends:\n raise ValueError(\n \"Invalid backend '{}'. Options are {}.\".format(backend, _audio_backends.keys())\n )\n _audio_backend = backend\n\n\ndef get_audio_backend() -> str:\n \"\"\"\n Gets the name of the package used to load.\n \"\"\"\n return _audio_backend\n\n\ndef _get_audio_backend_module() -> Any:\n \"\"\"\n Gets the module backend to load.\n \"\"\"\n backend = get_audio_backend()\n return _audio_backends[backend]\n\n\ndef _audio_backend_guard(backends: Union[str, List[str]]) -> Any:\n\n if isinstance(backends, str):\n backends = [backends]\n\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if get_audio_backend() not in backends:\n raise RuntimeError(\"Function {} requires backend to be one of {}.\".format(func.__name__, backends))\n return func(*args, **kwargs)\n return wrapper\n\n return decorator\n\n\ndef check_input(src: Tensor) -> None:\n if not torch.is_tensor(src):\n raise TypeError('Expected a tensor, got %s' % type(src))\n if src.is_cuda:\n raise TypeError('Expected a CPU based tensor, got %s' % type(src))\n" ]
[ [ "torch.is_tensor" ] ]
OmerRe/Zero_DCE_TF
[ "457c28751ccedd343a5bcdc750306d6f7501a3bc" ]
[ "test.py" ]
[ "import keras\nimport tensorflow as tf\nimport keras.backend as K\nimport os\nimport sys\nimport argparse\nimport time\nimport src.model\nimport numpy as np\nimport glob\n\nfrom PIL import Image\nfrom src.loss import *\nfrom src.model import DCE_x\nfrom keras import Model, Input\nfrom keras.layers import Concatenate, Conv2D\n\ntf.compat.v1.enable_eager_execution()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--lowlight_test_images_path', type=str, default=\"test/LIME/\")\nconfig = parser.parse_args()\n\ndef test(lowlight_test_images_path):\n input_img = Input(shape=(512, 512, 3))\n conv1 = Conv2D(32, (3, 3), strides=(1,1), activation='relu', padding='same')(input_img)\n conv2 = Conv2D(32, (3, 3), strides=(1,1), activation='relu', padding='same')(conv1)\n conv3 = Conv2D(32, (3, 3), strides=(1,1), activation='relu', padding='same')(conv2)\n conv4 = Conv2D(32, (3, 3), strides=(1,1), activation='relu', padding='same')(conv3)\n\n int_con1 = Concatenate(axis=-1)([conv4, conv3])\n conv5 = Conv2D(32, (3, 3), strides=(1,1), activation='relu', padding='same')(int_con1)\n int_con2 = Concatenate(axis=-1)([conv5, conv2])\n conv6 = Conv2D(32, (3, 3), strides=(1,1), activation='relu', padding='same')(int_con2)\n int_con3 = Concatenate(axis=-1)([conv6, conv1])\n x_r = Conv2D(24, (3,3), strides=(1,1), activation='tanh', padding='same')(int_con3)\n\n model = Model(inputs=input_img, outputs=x_r)\n model.load_weights(\"weights/ep_12_it_376.h5\")\n\n ### load image ###\n for test_file in glob.glob(lowlight_test_images_path + \"*.jpg\"):\n data_lowlight_path = test_file\n original_img = Image.open(data_lowlight_path)\n original_size = (np.array(original_img).shape[1], np.array(original_img).shape[0])\n\n original_img = original_img.resize((512,512), Image.ANTIALIAS)\n original_img = (np.asarray(original_img)/255.0)\n\n img_lowlight = Image.open(data_lowlight_path)\n\n img_lowlight = img_lowlight.resize((512,512), Image.ANTIALIAS)\n\n img_lowlight = (np.asarray(img_lowlight)/255.0)\n img_lowlight = np.expand_dims(img_lowlight, 0)\n # img_lowlight = K.constant(img_lowlight)\n\n ### process image ###\n start = time.time()\n A = model.predict(img_lowlight)\n r1, r2, r3, r4, r5, r6, r7, r8 = A[:,:,:,:3], A[:,:,:,3:6], A[:,:,:,6:9], A[:,:,:,9:12], A[:,:,:,12:15], A[:,:,:,15:18], A[:,:,:,18:21], A[:,:,:,21:24]\n x = original_img + r1 * (K.pow(original_img,2)-original_img)\n x = x + r2 * (K.pow(x,2)-x)\n x = x + r3 * (K.pow(x,2)-x)\n enhanced_image_1 = x + r4*(K.pow(x,2)-x)\n x = enhanced_image_1 + r5*(K.pow(enhanced_image_1,2)-enhanced_image_1)\n x = x + r6*(K.pow(x,2)-x)\n x = x + r7*(K.pow(x,2)-x)\n enhance_image = x + r8*(K.pow(x,2)-x)\n enhance_image = tf.cast((enhance_image[0,:,:,:] * 255), dtype=np.uint8)\n enhance_image = Image.fromarray(enhance_image.numpy())\n enhance_image = enhance_image.resize(original_size, Image.ANTIALIAS)\n duration = time.time() - start\n print('duration time of {}: {} seconds'.format(test_file, duration))\n enhance_image.save(\"test/results_12/\" + '/'.join(test_file.split('/')[2:]))\n\n\ntest(config.lowlight_test_images_path)" ]
[ [ "numpy.expand_dims", "numpy.asarray", "tensorflow.cast", "tensorflow.compat.v1.enable_eager_execution", "numpy.array" ] ]
sboominathan/DeepRL
[ "a415f23b9fff2f7179b1bd3c42ee46df64a605d4" ]
[ "agent/DDPG_agent.py" ]
[ "#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nimport numpy as np\nimport torch.multiprocessing as mp\nfrom network import *\nfrom utils import *\nfrom component import *\nimport pickle\nimport os\nimport time\n\nclass DDPGAgent:\n def __init__(self, config):\n self.config = config\n self.task = config.task_fn()\n self.worker_network = config.network_fn()\n self.target_network = config.network_fn()\n self.target_network.load_state_dict(self.worker_network.state_dict())\n self.actor_opt = config.actor_optimizer_fn(self.worker_network.actor.parameters())\n self.critic_opt = config.critic_optimizer_fn(self.worker_network.critic.parameters())\n self.replay = config.replay_fn()\n self.random_process = config.random_process_fn()\n self.criterion = nn.MSELoss()\n self.total_steps = 0\n\n self.state_normalizer = Normalizer(self.task.state_dim)\n self.reward_normalizer = Normalizer(1)\n\n self.action_history = []\n self.policy_history = []\n\n def soft_update(self, target, src):\n for target_param, param in zip(target.parameters(), src.parameters()):\n target_param.data.copy_(target_param.data * (1.0 - self.config.target_network_mix) +\n param.data * self.config.target_network_mix)\n\n def save(self, file_name):\n with open(file_name, 'wb') as f:\n torch.save(self.worker_network.state_dict(), f)\n\n def close(self):\n pass\n\n def episode(self, deterministic=False, video_recorder=None, record_actions=False):\n self.random_process.reset_states()\n state = self.task.reset()\n state = self.state_normalizer(state)\n\n config = self.config\n actor = self.worker_network.actor\n critic = self.worker_network.critic\n target_actor = self.target_network.actor\n target_critic = self.target_network.critic\n\n steps = 0\n total_reward = 0.0\n while True:\n actor.eval()\n action = actor.predict(np.stack([state])).flatten()\n if not deterministic:\n action += self.random_process.sample()\n next_state, reward, done, info = self.task.step(action)\n if video_recorder is not None:\n video_recorder.capture_frame()\n next_state = self.state_normalizer(next_state)\n total_reward += reward\n reward = self.reward_normalizer(reward)\n\n if not deterministic:\n self.replay.feed([state, action, reward, next_state, int(done)])\n self.total_steps += 1\n\n if record_actions:\n self.action_history.append(action)\n\n steps += 1\n state = next_state\n\n if done:\n break\n\n if not deterministic and self.replay.size() >= config.min_memory_size:\n self.worker_network.train()\n experiences = self.replay.sample()\n states, actions, rewards, next_states, terminals = experiences\n q_next = target_critic.predict(next_states, target_actor.predict(next_states))\n terminals = critic.variable(terminals).unsqueeze(1)\n rewards = critic.variable(rewards).unsqueeze(1)\n q_next = config.discount * q_next * (1 - terminals)\n q_next.add_(rewards)\n q_next = q_next.detach()\n q = critic.predict(states, actions)\n critic_loss = self.criterion(q, q_next)\n\n critic.zero_grad()\n self.critic_opt.zero_grad()\n critic_loss.backward()\n self.critic_opt.step()\n\n actions = actor.predict(states, False)\n var_actions = Variable(actions.data, requires_grad=True)\n q = critic.predict(states, var_actions)\n q.backward(critic.tensor(np.ones(q.size())))\n\n actor.zero_grad()\n self.actor_opt.zero_grad()\n actions.backward(-var_actions.grad.data)\n self.actor_opt.step()\n\n self.soft_update(self.target_network, self.worker_network)\n\n return total_reward, steps\n\n\n def save_policy_history(self, file_name):\n np.save(file_name, self.policy_history)\n\n def save_action_history(self, file_name):\n np.save(file_name, self.action_history)\n\n def clear_action_history(self):\n self.action_history = []\n \n def clear_policy_history(self):\n self.policy_history = []\n\n" ]
[ [ "numpy.stack", "numpy.save" ] ]
hzhwcmhf/pytorch-pretrained-BERT
[ "485adde74244f9b614263420d1f823660e0f96fe" ]
[ "tests/optimization_test.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\n\nimport torch\n\nfrom pytorch_pretrained_bert import BertAdam\n\nclass OptimizationTest(unittest.TestCase):\n\n def assertListAlmostEqual(self, list1, list2, tol):\n self.assertEqual(len(list1), len(list2))\n for a, b in zip(list1, list2):\n self.assertAlmostEqual(a, b, delta=tol)\n\n def test_adam(self):\n w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)\n target = torch.tensor([0.4, 0.2, -0.5])\n criterion = torch.nn.MSELoss(reduction='elementwise_mean')\n # No warmup, constant schedule, no gradient clipping\n optimizer = BertAdam(params=[w], lr=2e-1,\n weight_decay_rate=0.0,\n max_grad_norm=-1)\n for _ in range(100):\n loss = criterion(w, target)\n loss.backward()\n optimizer.step()\n w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.\n w.grad.zero_()\n self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.nn.MSELoss", "torch.tensor" ] ]
activatedgeek/uncertainty-da-bayesian-classification
[ "a270fb095f4790dea15327145897d09d0ba9c80b", "a270fb095f4790dea15327145897d09d0ba9c80b" ]
[ "src/bnn_priors/bnn_priors/models/dense_nets.py", "src/bnn_priors/bnn_priors/data/UCI/uci.py" ]
[ "from torch import nn, Tensor\n\nfrom .layers import Linear\nfrom .base import RegressionModel, RaoBRegressionModel, ClassificationModel\nfrom .. import prior\n\n__all__ = ('LinearNealNormal', 'LinearPrior', 'DenseNet', 'RaoBDenseNet',\n 'ClassificationDenseNet', 'LinearRegression', 'LogisticRegression',\n 'RaoBLinearRegression')\n\ndef LinearNealNormal(in_dim: int, out_dim: int, std_w: float, std_b: float) -> nn.Module:\n return Linear(prior.Normal((out_dim, in_dim), 0., std_w/in_dim**.5),\n prior.Normal((out_dim,), 0., std_b))\n\n\ndef LinearPrior(in_dim, out_dim, prior_w=prior.Normal, loc_w=0., std_w=1.,\n prior_b=prior.Normal, loc_b=0., std_b=1., scaling_fn=None,\n weight_prior_params={}, bias_prior_params={}):\n if scaling_fn is None:\n def scaling_fn(std, dim):\n return std/dim**0.5\n return Linear(prior_w((out_dim, in_dim), loc_w, scaling_fn(std_w, in_dim), **weight_prior_params),\n prior_b((out_dim,), 0., std_b), **bias_prior_params)\n\n\ndef DenseNet(in_features, out_features, width, depth=3, noise_std=1.,\n prior_w=prior.Normal, loc_w=0., std_w=2**.5,\n prior_b=prior.Normal, loc_b=0., std_b=1.,\n scaling_fn=None, weight_prior_params={}, bias_prior_params={}):\n layers = [LinearPrior(in_features, width, prior_w=prior_w, loc_w=loc_w,\n std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,\n scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,\n bias_prior_params=bias_prior_params),\n nn.ReLU()]\n for _ in range(depth-2):\n layers.append(LinearPrior(width, width, prior_w=prior_w, loc_w=loc_w,\n std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,\n scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,\n bias_prior_params=bias_prior_params))\n layers.append(nn.ReLU())\n layers.append(LinearPrior(width, out_features, prior_w=prior_w, loc_w=loc_w,\n std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,\n scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,\n bias_prior_params=bias_prior_params))\n return RegressionModel(nn.Sequential(*layers), noise_std)\n\n\ndef ClassificationDenseNet(in_features, out_features, width, depth=3, softmax_temp=1.,\n prior_w=prior.Normal, loc_w=0., std_w=2**.5,\n prior_b=prior.Normal, loc_b=0., std_b=1.,\n scaling_fn=None, weight_prior_params={}, bias_prior_params={}):\n layers = [LinearPrior(in_features, width, prior_w=prior_w, loc_w=loc_w,\n std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,\n scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,\n bias_prior_params=bias_prior_params),\n nn.ReLU()]\n for _ in range(depth-2):\n layers.append(LinearPrior(width, width, prior_w=prior_w, loc_w=loc_w,\n std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,\n scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,\n bias_prior_params=bias_prior_params))\n layers.append(nn.ReLU())\n layers.append(LinearPrior(width, out_features, prior_w=prior_w, loc_w=loc_w,\n std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,\n scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,\n bias_prior_params=bias_prior_params))\n return ClassificationModel(nn.Sequential(*layers), softmax_temp)\n\n\ndef RaoBDenseNet(x_train: Tensor, y_train: Tensor, width: int,\n noise_std: float=1.,\n prior_w=prior.Normal, loc_w=0., std_w=2**.5,\n prior_b=prior.Normal, loc_b=0., std_b=1.,\n scaling_fn=None) -> nn.Module:\n in_features = x_train.size(-1)\n return RaoBRegressionModel(\n x_train, y_train, noise_std,\n last_layer_std=(2/width)**.5,\n net=nn.Sequential(\n LinearPrior(in_features, width, prior_w=prior_w, loc_w=loc_w,\n std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,\n scaling_fn=scaling_fn),\n nn.ReLU(),\n LinearPrior(width, width, prior_w=prior_w, loc_w=loc_w,\n std_w=std_w, prior_b=prior_b, loc_b=loc_b, std_b=std_b,\n scaling_fn=scaling_fn),\n nn.ReLU()))\n\n\ndef LinearRegression(in_features, out_features, noise_std=1.,\n prior_w=prior.Normal, loc_w=0., std_w=2**.5,\n prior_b=prior.Normal, loc_b=0., std_b=1.,\n scaling_fn=None, weight_prior_params={}, bias_prior_params={}):\n return RegressionModel(LinearPrior(\n in_features, out_features,\n prior_w=prior_w, loc_w=loc_w, std_w=std_w,\n prior_b=prior_b, loc_b=loc_b, std_b=std_b,\n scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,\n bias_prior_params=bias_prior_params), noise_std=noise_std)\n\ndef LogisticRegression(in_features, out_features, softmax_temp=1.,\n prior_w=prior.Normal, loc_w=0., std_w=2**.5,\n prior_b=prior.Normal, loc_b=0., std_b=1.,\n scaling_fn=None, weight_prior_params={}, bias_prior_params={}):\n return ClassificationModel(LinearPrior(\n in_features, out_features,\n prior_w=prior_w, loc_w=loc_w, std_w=std_w,\n prior_b=prior_b, loc_b=loc_b, std_b=std_b,\n scaling_fn=scaling_fn, weight_prior_params=weight_prior_params,\n bias_prior_params=bias_prior_params), softmax_temp=softmax_temp)\n\ndef RaoBLinearRegression(x_train: Tensor, y_train: Tensor, noise_std: float=1.,\n std_w: float=2**.5):\n in_features = x_train.size(-1)\n return RaoBRegressionModel(\n x_train, y_train, noise_std,\n last_layer_std=std_w * in_features**-.5,\n net=nn.Identity())\n", "import os\nimport torch as t\nimport numpy as np\nfrom torch.utils.data import TensorDataset\n\nfrom bnn_priors.data import Dataset\n\n__all__ = ('UCI',)\n\n\nclass UCI:\n \"\"\"\n The usage is:\n ```\n uci = UCIDataset(\"protein\", 3)\n ```\n e.g. normalized training dataset:\n ```\n uci.norm.train\n ```\n \"\"\"\n def __init__(self, dataset, split, dtype='float32', device=\"cpu\"):\n _ROOT = os.path.abspath(os.path.dirname(__file__))\n dataset_dir = f'{_ROOT}/{dataset}/'\n data = np.loadtxt(f'{dataset_dir}/data.txt').astype(getattr(np, dtype))\n index_features = np.loadtxt(f'{dataset_dir}/index_features.txt')\n index_target = np.loadtxt(f'{dataset_dir}/index_target.txt')\n X_unnorm = t.from_numpy(data[:, index_features.astype(int)])\n y_unnorm = t.from_numpy(data[:, index_target.astype(int):index_target.astype(int)+1])\n\n # split into train and test\n index_train = np.loadtxt(f'{dataset_dir}/index_train_{split}.txt').astype(int)\n index_test = np.loadtxt(f'{dataset_dir}/index_test_{split}.txt').astype(int)\n\n # record unnormalized dataset\n self.unnorm = Dataset(X_unnorm, y_unnorm, index_train, index_test, device)\n\n # compute normalization constants based on training set\n self.X_std = t.std(self.unnorm.train_X, 0)\n self.X_std[self.X_std == 0] = 1. # ensure we don't divide by zero\n self.X_mean = t.mean(self.unnorm.train_X, 0)\n\n self.y_mean = t.mean(self.unnorm.train_y)\n self.y_std = t.std(self.unnorm.train_y)\n\n X_norm = (self.unnorm.X - self.X_mean)/self.X_std\n y_norm = (self.unnorm.y - self.y_mean)/self.y_std\n\n self.norm = Dataset(X_norm, y_norm, index_train, index_test, device)\n\n self.num_train_set = self.unnorm.X.shape[0]\n self.in_shape = self.unnorm.X.shape[1:]\n self.out_shape = self.unnorm.y.shape[1:]\n\n def denormalize_y(self, y):\n return self.y_std * y + self.y_mean\n\n\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.Identity" ], [ "torch.mean", "torch.std", "numpy.loadtxt" ] ]
snad-space/ad_examples
[ "7c62a81f52e79874d6215b262f5a849d56eeae4f" ]
[ "ad_examples/ad/kde_outlier.py" ]
[ "import numpy.random as rnd\n\nfrom ..common.gen_samples import *\nfrom ..common.gen_samples import *\n\n\"\"\"\npythonw -m ad_examples.ad.kde_outlier\n\"\"\"\n\n\nif __name__ == \"__main__\":\n\n logger = logging.getLogger(__name__)\n\n args = get_command_args(debug=True, debug_args=[\"--debug\",\n \"--plot\",\n \"--log_file=temp/kde_outlier.log\"])\n # print \"log file: %s\" % args.log_file\n configure_logger(args)\n\n oneD = True\n\n if oneD:\n # 1D data\n rnd.seed(42)\n x = np.append(stats.uniform.rvs(-1, 0.5, 20), stats.uniform.rvs(0.3, 2.0, 15))\n kernel = stats.gaussian_kde(x)\n scores = kernel.evaluate(x)\n xx = np.arange(x.min(), x.max(), 0.1)\n # logger.debug(\"xx:\\n%s\" % str(xx))\n t_scores = kernel.evaluate(xx)\n # logger.debug(\"scores:\\n%s\" % str(t_scores))\n px = np.hstack((np.transpose([x]), np.zeros(shape=(x.shape[0], 1))))\n tx = np.hstack((np.transpose([xx]), np.transpose([t_scores])))\n top_anoms = np.argsort(scores)[np.arange(5)]\n\n sxs = []\n # xx_ = np.arange(x.min(), x.max(), 0.1)\n logger.debug(\"kernel.factor:\\n%s\" % str(kernel.factor))\n for i in range(len(x)):\n # x_ = x[i]\n # k_ = stats.gaussian_kde(x_, kernel.factor)\n # ts_ = kernel.evaluate(xx_)\n ts_ = (1./(kernel.factor * len(x))) * np.exp(-0.5 * ((xx - x[i]) / kernel.factor) ** 2)\n logger.debug(\"ts_:\\n%s\" % str(list(ts_)))\n tx_ = np.hstack((np.transpose([xx]), np.transpose([ts_])))\n sxs.append(tx_)\n scols = ['blue'] * len(sxs)\n lines = [tx]\n lines.extend(sxs)\n line_colors = ['red']\n line_colors.extend(scols)\n line_widths = [2]\n line_widths.extend([1] * len(sxs))\n logger.debug(line_colors)\n plot_samples_and_lines(px,\n lines=lines, line_colors=line_colors, line_legends=None,\n top_anoms=top_anoms,\n pdfpath=\"temp/kde_1d_outlier.pdf\",\n line_widths=line_widths, samplescol=\"green\", marker='x', s=35)\n else:\n # sample_type = \"4_\"\n # sample_type = \"donut_\"\n sample_type = \"face_\"\n\n rnd.seed(42)\n\n x, y = get_demo_samples(sample_type)\n\n n = x.shape[0]\n\n xx = yy = x_grid = Z = scores = None\n if args.plot:\n # plot_synthetic_samples(x, y, pdfpath=\"temp/kde_%ssamples.pdf\" % sample_type)\n\n # to plot probability contours\n xx, yy = np.meshgrid(np.linspace(np.min(x[:, 0]), np.max(x[:, 0]), 50),\n np.linspace(np.min(x[:, 1]), np.max(x[:, 1]), 50))\n x_grid = np.c_[xx.ravel(), yy.ravel()]\n\n kernel = stats.gaussian_kde(x.T)\n scores = kernel.evaluate(x.T)\n logger.debug(scores)\n top_anoms = np.argsort(scores)[np.arange(10)]\n\n if args.plot:\n # plot_samples_and_lines(x, lines=None, line_colors=None, line_legends=None,\n # top_anoms=top_anoms,\n # pdfpath=\"temp/kde_%soutlier.pdf\" % sample_type)\n\n test_scores = kernel.evaluate(x_grid.T)\n Z = -np.reshape(test_scores, xx.shape)\n pdfpath = \"temp/kde_%scontours.pdf\" % sample_type\n dp = DataPlotter(pdfpath=pdfpath, rows=1, cols=1)\n pl = dp.get_next_plot()\n pl.contourf(xx, yy, Z, 20, cmap=plt.cm.get_cmap('jet'))\n dp.plot_points(x, pl, labels=y, lbl_color_map={0: \"grey\", 1: \"red\"}, s=25)\n pl.scatter(x[top_anoms, 0], x[top_anoms, 1], marker='o', s=35,\n edgecolors='green', facecolors='none')\n dp.close()\n\n" ]
[ [ "numpy.random.seed" ] ]
wjy199708/my_point_painting
[ "32dd845b08a94e222e913471e42e9d9e128ba213" ]
[ "train.py" ]
[ "import torch\nimport copy\nfrom model import SSD, MultiBoxLoss\nfrom dataset import KittiDataset\nfrom torch.utils.data import DataLoader\nimport time\n\n\ndef train_model(model, dataloaders, criterion, optimizer, num_epochs=1):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n since = time.time()\n val_acc_history = []\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n for phase in ['train', 'val']:\n if phase == 'train':\n print(f\"Starting train phase for epoch {epoch}\")\n model.train()\n else:\n print(f\"Starting validation phase for epoch {epoch}\")\n model.eval()\n\n batch_num = 0\n running_loss = 0.0\n running_corrects = 0\n iteration_num = 0\n\n for augmented_lidar_cam_coords, boxes, classes in dataloaders[phase]:\n iteration_num += 1\n bat_size = len(augmented_lidar_cam_coords)\n num_samples_so_far = iteration_num * bat_size\n if num_samples_so_far % 100 == 0:\n print(\"Samples processed for this epoch:\",\n num_samples_so_far, '/', len(dataloaders[phase])*bat_size)\n print(\"Average Loss so far this epoch is:\",\n running_loss/(num_samples_so_far-bat_size))\n if batch_num % 100 == 0:\n print(f'phase is {phase} and batch is {batch_num}.')\n batch_num += 1\n\n optimizer.zero_grad()\n\n with torch.set_grad_enabled(phase == 'train'):\n predicted_locs, predicted_scores, _ = model(\n augmented_lidar_cam_coords)\n loss = criterion(\n predicted_locs, predicted_scores, boxes, classes)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item() * bat_size\n print('-'*10)\n print('------------saving model--------------')\n torch.save(model.state_dict(), './painting_{}.pth'.format(epoch))\n return model # , val_acc_history\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nssd = SSD(resnet_type=34, n_classes=2).to(device)\n\ntrainset = KittiDataset(\n root=\"/media/lab509/wjy/from_github/PointRCNN/data/KITTI/object\", mode=\"training\", valid=False)\nvalset = KittiDataset(\n root=\"/media/lab509/wjy/from_github/PointRCNN/data/KITTI/object\", mode=\"training\", valid=True)\n\ndatasets = {'train': trainset, 'val': valset}\ndataloaders_dict = {x: DataLoader(datasets[x], batch_size=4, shuffle=True,\n collate_fn=datasets[x].collate_fn, num_workers=0, drop_last=True) for x in ['train', 'val']}\n\noptimizer_ft = torch.optim.SGD(ssd.parameters(), lr=0.0001, momentum=0.9)\ncriterion = MultiBoxLoss(priors_cxcy=ssd.priors_cxcy).to(device)\n\nssd = train_model(ssd, dataloaders_dict, criterion,\n optimizer_ft, num_epochs=10)\n\"\"\" 模型保存 \"\"\"\ntorch.save(ssd.state_dict(), './pointpillars.pth')\n" ]
[ [ "torch.set_grad_enabled", "torch.utils.data.DataLoader", "torch.cuda.is_available" ] ]
DewMaple/head_box
[ "27ac90511344bfa75b340d1db960365e9eb148c7" ]
[ "utils/__init__.py" ]
[ "from distutils.version import LooseVersion\n\nimport tensorflow as tf\n\nTENSORFLOW_VERSION = LooseVersion(tf.__version__)\n\n\ndef tf_concat(axis, values, **kwargs):\n if TENSORFLOW_VERSION >= LooseVersion('1.0'):\n return tf.concat(values, axis, **kwargs)\n else:\n return tf.concat(axis, values, **kwargs)\n" ]
[ [ "tensorflow.concat" ] ]
todo-group/exact
[ "ee76421fab9b2b1eaf77d6b01830a18e66f7180a" ]
[ "gallery/ising-square-tc/plot.py" ]
[ "import math\nimport matplotlib.pyplot as plt\n\nfilename = \"result-p15.dat\"\n\nwith open(filename, 'r') as f:\n for line in f:\n data = line.split()\n if (data[0] == \"inf\"):\n free_energy_inf = float(data[6])\n energy_inf = float(data[7])\n\nL = []\nfree_energy = []\nenergy = []\nwith open(filename, 'r') as f:\n for line in f:\n data = line.split()\n if (data[0] != '#' and data[0] != \"inf\"):\n L.append(int(data[0]))\n free_energy.append(math.fabs(float(data[6])-free_energy_inf))\n energy.append(math.fabs(float(data[7])-energy_inf))\n \nplt.plot(L, free_energy, marker = 'o', label = 'finite-size error of free energy density')\nplt.plot(L, energy, marker = 'v', label = 'finite-size error of energy density')\nplt.xlabel('L')\nplt.xscale('log')\nplt.yscale('log')\nplt.grid()\nplt.legend()\nplt.savefig('plot.pdf')\nplt.show()\n\nprint(L)\nprint(free_energy)\nprint(energy)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.xscale", "matplotlib.pyplot.yscale", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show" ] ]
dornik/sporeagent
[ "a95139c47534670f7a47f86adf62d3e488981409" ]
[ "dataset/dataset.py" ]
[ "import numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision\nimport os\nimport pickle\nimport trimesh\nimport sys\nimport skimage.io as skio\nfrom tqdm import tqdm\nimport open3d as o3d\nimport cv2 as cv\nfrom scipy.spatial.transform.rotation import Rotation\nimport glob\n\nimport config as cfg\nimport dataset.augmentation as Transforms\n\nsys.path.append(cfg.BOP_PATH)\nsys.path.append(os.path.join(cfg.BOP_PATH, \"bop_toolkit_lib\"))\nimport bop_toolkit_lib.inout as bop_inout\nimport bop_toolkit_lib.dataset_params as bop_dataset_params\n\n\nclass DatasetLinemod(Dataset):\n\n def __init__(self, split, dataset_path=cfg.LM_PATH):\n self.split_name = split\n subsample = 16 if split == \"eval\" else 0 # use every 16th test sample for evaluation during training\n split = \"test\" if split == \"eval\" else split\n self.split = split\n self.dataset_path = dataset_path\n self.samples, self.models, self.symmetries = self.get_samples(split, subsample)\n self.transforms = self.get_transforms(split)\n\n def __len__(self):\n return len(self.samples)\n\n def __getitem__(self, idx):\n item = self.samples[idx]\n\n # compose sample\n model = self.models[item['obj_id']]\n gt = np.eye(4, dtype=np.float32)\n gt[:3, :3] = item['gt']['cam_R_m2c']\n gt[:3, 3] = item['gt']['cam_t_m2c'].squeeze()\n\n sample = {\n 'idx': idx,\n 'points_src': item['pcd'],\n 'points_ref': model,\n 'scene': item['scene'],\n 'frame': item['frame'],\n 'cam': item['cam'],\n 'obj_id': item['gt']['obj_id'],\n 'gt_m2c': gt,\n 'plane_src': item['plane_pcd'],\n 'plane_ref': self.models[0]\n }\n if 'est' in item: # initial estimate only given for test split (using PoseCNN)\n sample['est_m2c'] = item['est']\n if self.symmetries is not None:\n sample['symmetries'] = self.symmetries[item['obj_id']]['symmetries'] # padded to max number of syms\n sample['num_symmetries'] = self.symmetries[item['obj_id']][\n 'num_symmetries'] # num of valid syms (rest Id)\n if self.split != 'test' and 'cam_R_w2c' in item['cam']: # use annotated supporting plane for training\n sample['plane_m2c'] = np.eye(4, dtype=np.float32)\n sample['plane_m2c'][:3, :3] = sample['cam']['cam_R_w2c']\n sample['plane_m2c'][:3, 3] = sample['cam']['cam_t_w2c'].squeeze()\n elif 'plane' in item: # pre-computed supporting plane (RANSAC) when not available and for testing\n sample['plane_m2c'] = item['plane']\n\n if self.transforms:\n sample = self.transforms(sample)\n return sample\n\n def get_transforms(self, split):\n # error/noise magnitudes per dataset\n if self.dataset_path == cfg.YCBV_PATH:\n rot_mag, trans_mag = 75.0, 0.75\n p_fg = [0.8, 1.0]\n else:\n rot_mag, trans_mag = 90.0, 1.0\n p_fg = [0.5, 1.0]\n\n # prepare augmentations\n if split == \"train\":\n transforms = [\n # resample segmentation (with [p_fg]% from object)\n Transforms.SegmentResampler(1024, p_fg=p_fg),\n # align source and target using GT -- easier to define error this way\n Transforms.GtTransformSE3(align_plane=self.dataset_path == cfg.LM_PATH),\n # normalize source and target (mean centered, max dist 1.0)\n Transforms.Normalize(),\n # apply an initial pose error\n Transforms.RandomTransformSE3(rot_mag=rot_mag, trans_mag=trans_mag, random_mag=True),\n ]\n if cfg.USE_CONTACT:\n transforms.append(Transforms.RandomTransformSE3_plane(rot_mag=5.0, trans_mag=0.02, random_mag=True))\n # note: re-computing them from resampled pcds increases variance\n if cfg.USE_NORMALS and not cfg.PRECOMPUTE_NORMALS:\n transforms.insert(1, Transforms.ComputeNormals())\n elif split == \"val\":\n transforms = [\n Transforms.SetDeterministic(),\n Transforms.SegmentResampler(1024, p_fg=p_fg),\n Transforms.GtTransformSE3(align_plane=self.dataset_path == cfg.LM_PATH),\n Transforms.Normalize(),\n Transforms.RandomTransformSE3(rot_mag=rot_mag, trans_mag=trans_mag, random_mag=True),\n ]\n if cfg.USE_CONTACT:\n transforms.append(Transforms.RandomTransformSE3_plane(rot_mag=5.0, trans_mag=0.02, random_mag=True))\n if cfg.USE_NORMALS and not cfg.PRECOMPUTE_NORMALS:\n transforms.insert(2, Transforms.ComputeNormals())\n else: # start from posecnn\n transforms = [\n Transforms.SetDeterministic(),\n # randomly resample inside segmentation mask (estimated by PoseCNN)\n Transforms.SegmentResampler(1024, p_fg=1.0, patch=False), # note: fg is predicted mask\n Transforms.EstTransformSE3(align_plane=self.dataset_path == cfg.LM_PATH),\n Transforms.Normalize()\n ]\n if cfg.USE_NORMALS and not cfg.PRECOMPUTE_NORMALS:\n transforms.insert(2, Transforms.ComputeNormals())\n return torchvision.transforms.Compose(transforms)\n\n def get_samples(self, split, subsample=0):\n # ============= GET MODELS ============\n model_type = \"eval\"\n model_params = bop_dataset_params.get_model_params('/'.join(self.dataset_path.split('/')[:-1]),\n self.dataset_path.split('/')[-1], model_type)\n mesh_ids = model_params['obj_ids']\n\n # plane as 0 object: xy plane, z up\n plane_mm = 1000/np.sqrt(2) # size in mm\n plane_samples = 4096\n plane = np.hstack([v.reshape(-1, 1)\n for v in np.meshgrid(*[np.linspace(-plane_mm/2, plane_mm/2, int(np.sqrt(plane_samples)))]*2)]\n + [np.zeros((plane_samples, 3)), np.ones((plane_samples, 1))]).astype(np.float32)\n models = {0: plane}\n for mesh_id in mesh_ids:\n mesh = trimesh.load(os.path.join(self.dataset_path, f\"models_{model_type}/obj_{mesh_id:06d}.ply\"))\n pcd, face_indices = trimesh.sample.sample_surface_even(mesh, 4096)\n if pcd.shape[0] < 4096: # pad by additional samples if less than 4096 were returned\n additional_samples = np.random.choice(np.arange(pcd.shape[0]), size=4096 - pcd.shape[0], replace=True)\n pcd = np.vstack([pcd, pcd[additional_samples]])\n face_indices = np.hstack([face_indices, face_indices[additional_samples]])\n models[mesh_id] = np.hstack([pcd, mesh.face_normals[face_indices]]).astype(np.float32)\n\n # ============= GET DATASET SAMPLES ============\n samples_path = f\"sporeagent/{split}_posecnn_plane.pkl\" if split == \"test\"\\\n else f\"sporeagent/{split}_plane.pkl\"\n with open(os.path.join(self.dataset_path, samples_path), 'rb') as file:\n samples = pickle.load(file)\n if subsample > 0: # used for evaluation during training\n samples = samples[::subsample]\n\n if cfg.PRECOMPUTE_NORMALS:\n print(f\"precomputing normals for {len(samples)} samples...\")\n for sample in tqdm(samples):\n src = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(sample['pcd'][:, :3]))\n src.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=10, max_nn=30))\n src.orient_normals_to_align_with_direction([0, 0, -1])\n sample['pcd'] = np.hstack(\n [sample['pcd'][:, :3], np.asarray(src.normals).astype(np.float32),\n sample['pcd'][:, -1][:, None]]) # mask\n if 'plane_pcd' in sample:\n plane_src = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(sample['plane_pcd'][:, :3]))\n plane_src.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=50, max_nn=30))\n plane_src.orient_normals_to_align_with_direction([0, 0, -1])\n sample['plane_pcd'] = np.hstack(\n [sample['plane_pcd'][:, :3], np.asarray(plane_src.normals).astype(np.float32),\n sample['plane_pcd'][:, -1][:, None]]) # mask\n\n if cfg.USE_SYMMETRY: # add symmetry information from models_info.json\n meta_sym = bop_inout.load_json(model_params['models_info_path'], True)\n obj_symmetries = {0: []}\n for obj_id in models.keys():\n if obj_id == 0:\n continue\n elif self.dataset_path == cfg.LM_PATH and obj_id in [3, 7]:\n continue\n # always add identity as one of the correct poses\n obj_symmetries[obj_id] = [np.eye(4, dtype=np.float32)]\n # note: assumes that we have either one or the other -- no combination done here\n if 'symmetries_continuous' in meta_sym[obj_id]:\n # sample rotations about the given axis\n axis = 'xyz'[meta_sym[obj_id]['symmetries_continuous'][0]['axis'].index(1)]\n sym = np.eye(4, dtype=np.float32)\n sym[:3, 3] = np.asarray(meta_sym[obj_id]['symmetries_continuous'][0]['offset'])\n axis_symmetries = []\n for angle in range(cfg.SYMMETRY_AXIS_DELTA, 360, cfg.SYMMETRY_AXIS_DELTA):\n sym_step = sym.copy()\n sym_step[:3, :3] = Rotation.from_euler(axis, angle, degrees=True).as_matrix()\n axis_symmetries.append(sym_step)\n obj_symmetries[obj_id] += axis_symmetries\n elif 'symmetries_discrete' in meta_sym[obj_id]:\n obj_symmetries[obj_id] += [np.asarray(sym).reshape(4, 4).astype(np.float32)\n for sym in meta_sym[obj_id]['symmetries_discrete']]\n # pad to max number of symmetries (for default pytorch collate_fn) and get symmetry count (for retrieval)\n max_num_symmetries = max([len(syms) for syms in obj_symmetries.values()])\n symmetries = {}\n for obj_id, syms in obj_symmetries.items():\n num_symmetries = len(syms)\n symmetries[obj_id] = {\n 'symmetries': np.stack(syms\n + [np.eye(4, dtype=np.float32)] * (max_num_symmetries - num_symmetries)),\n 'num_symmetries': num_symmetries\n }\n else:\n symmetries = None\n\n return samples, models, symmetries\n\n # for visualization\n def get_rgb(self, scene_id, im_id):\n test_path = \"test\"\n scene_path = os.path.join(self.dataset_path, f\"{test_path}/{scene_id:06d}\")\n file_path = os.path.join(scene_path, f\"rgb/{im_id:06d}.png\")\n if os.path.exists(file_path):\n return bop_inout.load_im(file_path)[..., :3]/255\n else:\n print(f\"missing file: {file_path}\")\n return np.zeros((480, 640, 3), dtype=np.float32)\n\n def get_depth(self, scene_id, im_id, depth_scale=1.0):\n test_path = \"test\"\n scene_path = os.path.join(self.dataset_path, f\"{test_path}/{scene_id:06d}\")\n file_path = os.path.join(scene_path, f\"depth/{im_id:06d}.png\")\n if os.path.exists(file_path):\n return bop_inout.load_depth(file_path) * depth_scale\n else:\n print(f\"missing file: {file_path}\")\n return np.zeros((480, 640), dtype=np.float32)\n\n def get_normal(self, scene_id, im_id, depth_scale=1.0):\n test_path = \"test\"\n scene_path = os.path.join(self.dataset_path, f\"{test_path}/{scene_id:06d}\")\n file_path = os.path.join(scene_path, f\"normal/{im_id:06d}.tiff\")\n if os.path.exists(file_path):\n return skio.imread(file_path)\n else:\n print(f\"missing file: {file_path} -- trying to compute from depth\")\n basepath = \"/\".join(file_path.split(\"/\")[:-1])\n if not os.path.exists(basepath):\n os.mkdir(basepath)\n\n D = self.get_depth(scene_id, im_id, depth_scale)\n D_px = D.copy()\n # inpaint missing depth values\n D_px = cv.inpaint(D_px.astype(np.float32), np.uint8(D_px == 0), 3, cv.INPAINT_NS)\n # blur\n blur_size = (9, 9) if self.dataset_path == cfg.YCBV_PATH else (3, 3)\n D_px = cv.GaussianBlur(D_px, blur_size, sigmaX=10.0)\n # get derivatives\n kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])\n kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])\n dzdx = cv.filter2D(D_px, -1, kernelx)\n dzdy = cv.filter2D(D_px, -1, kernely)\n # gradient ~ normal\n normal = np.dstack((dzdy, dzdx, D_px != 0.0)) # only where we have a depth value\n n = np.linalg.norm(normal, axis=2)\n n = np.dstack((n, n, n))\n normal = np.divide(normal, n, where=(n != 0))\n # remove invalid values\n normal[n == 0] = 0.0\n normal[D == 0] = 0.0\n # save normals for next use\n skio.imsave(file_path, normal)\n return normal\n\n def get_seg(self, scene_id, im_id, gt_id=-1):\n scene_path = os.path.join(self.dataset_path, f\"test/{scene_id:06d}\")\n if gt_id >= 0:\n file_path = os.path.join(scene_path, f\"mask_visib/{im_id:06d}_{gt_id:06d}.png\")\n if os.path.exists(file_path):\n return bop_inout.load_im(file_path)\n else:\n print(f\"missing file: {file_path}\")\n return np.zeros((480, 640), dtype=np.uint8)\n else:\n file_paths = sorted(glob.glob(os.path.join(scene_path, f\"mask_visib/{im_id:06d}_*.png\")))\n meta = bop_inout.load_json(os.path.join(self.dataset_path, f\"test/{scene_id:06d}/scene_gt.json\"))[str(im_id)]\n obj_ids = [info['obj_id'] for info in meta]\n masks = [bop_inout.load_im(file_path) for file_path in file_paths]\n labels = np.zeros((480, 640), dtype=np.uint8)\n for (mask, obj_id) in zip(masks, obj_ids):\n labels[mask > 0] = obj_id\n return labels\n\n\nclass DatasetYcbVideo(DatasetLinemod):\n\n def __init__(self, split, dataset_path=cfg.YCBV_PATH):\n super().__init__(split, dataset_path)\n\n def __getitem__(self, idx):\n frame_samples = self.samples[idx]\n if self.transforms:\n # same plane for all objects in the frame --> same error applied to GT for train and val\n if cfg.USE_CONTACT and self.split != 'test':\n # assert isinstance(self.transforms.transforms[-1], Transforms.RandomTransformSE3_plane)\n plane_err = self.transforms.transforms[-1].generate_transform_plane()\n for sample in frame_samples:\n sample['plane_err'] = plane_err\n\n frame_samples = [self.transforms(obj_sample.copy()) for obj_sample in frame_samples]\n return frame_samples\n\n def get_samples(self, split, subsample=0):\n # ============= GET MODELS ============\n model_type = \"eval_canonical\" if cfg.USE_CANONICAL else \"eval\"\n model_params = bop_dataset_params.get_model_params('/'.join(self.dataset_path.split('/')[:-1]),\n self.dataset_path.split('/')[-1], model_type)\n mesh_ids = model_params['obj_ids']\n\n # plane as 0 object: xy plane, z up\n plane_mm = 1000/np.sqrt(2) # scale in mm\n plane_samples = 4096\n plane = np.hstack([v.reshape(-1, 1)\n for v in\n np.meshgrid(*[np.linspace(-plane_mm / 2, plane_mm / 2, int(np.sqrt(plane_samples)))] * 2)]\n + [np.zeros((plane_samples, 3)), np.ones((plane_samples, 1))]).astype(np.float32)\n models = {0: plane}\n for mesh_id in mesh_ids:\n mesh = trimesh.load(os.path.join(self.dataset_path, f\"models_{model_type}/obj_{mesh_id:06d}.ply\"))\n pcd, face_indices = trimesh.sample.sample_surface_even(mesh, 4096)\n if pcd.shape[0] < 4096: # get additional samples if less were returned\n additional_samples = np.random.choice(np.arange(pcd.shape[0]), size=4096 - pcd.shape[0], replace=True)\n pcd = np.vstack([pcd, pcd[additional_samples]])\n face_indices = np.hstack([face_indices, face_indices[additional_samples]])\n models[mesh_id] = np.hstack([pcd, mesh.face_normals[face_indices]]).astype(np.float32)\n\n # ============= ADD SYMMETRY INFORMATION ============\n if cfg.USE_SYMMETRY: # add symmetry information from models_info.json\n from scipy.spatial.transform.rotation import Rotation\n info_path = model_params['models_info_path']\n info_path = info_path.replace('models_eval', 'models_eval_canonical') if not cfg.USE_CANONICAL else info_path\n meta_sym = bop_inout.load_json(info_path, True)\n obj_symmetries = {0: []}\n for obj_id in models.keys():\n if obj_id == 0:\n continue\n elif self.dataset_path == cfg.LM_PATH and obj_id in [3, 7]:\n continue\n # always add identity as one of the correct poses\n obj_symmetries[obj_id] = [np.eye(4, dtype=np.float32)]\n # note: assumes that we have either one or the other -- no combination done here\n if 'symmetries_continuous' in meta_sym[obj_id]:\n # sample rotations about the given axis\n axis = 'xyz'[meta_sym[obj_id]['symmetries_continuous'][0]['axis'].index(1)]\n sym = np.eye(4, dtype=np.float32)\n sym[:3, 3] = np.asarray(meta_sym[obj_id]['symmetries_continuous'][0]['offset'])\n axis_symmetries = []\n for angle in range(cfg.SYMMETRY_AXIS_DELTA, 360, cfg.SYMMETRY_AXIS_DELTA):\n sym_step = sym.copy()\n sym_step[:3, :3] = Rotation.from_euler(axis, angle, degrees=True).as_matrix()\n axis_symmetries.append(sym_step)\n obj_symmetries[obj_id] += axis_symmetries\n # cylinder -- both\n if 'symmetries_discrete' in meta_sym[obj_id]:\n syms = [np.asarray(sym).reshape(4, 4).astype(np.float32)\n for sym in meta_sym[obj_id]['symmetries_discrete']]\n assert len(syms) == 1\n up_down = [syms[0] @ axis_sym for axis_sym in axis_symmetries]\n obj_symmetries[obj_id] += up_down\n\n elif 'symmetries_discrete' in meta_sym[obj_id]:\n obj_symmetries[obj_id] += [np.asarray(sym).reshape(4, 4).astype(np.float32)\n for sym in meta_sym[obj_id]['symmetries_discrete']]\n # pad to max number of symmetries (for default pytorch collate_fn) and get symmetry count (for retrieval)\n max_num_symmetries = max([len(syms) for syms in obj_symmetries.values()])\n symmetries = {}\n for obj_id, syms in obj_symmetries.items():\n num_symmetries = len(syms)\n symmetries[obj_id] = {\n 'symmetries': np.stack(syms\n + [np.eye(4, dtype=np.float32)] * (max_num_symmetries - num_symmetries)),\n 'num_symmetries': num_symmetries\n }\n else:\n symmetries = None\n\n # ============= PREP CANONICAL INFO ==========\n # get GT to canonical model space trafos\n code_path = os.path.dirname(os.path.abspath(__file__))\n meta_canon = bop_inout.load_json(os.path.join(code_path, \"bop_models_meta.json\"), True)\n canonical_offsets = {}\n for obj_id in models.keys():\n if obj_id == 0:\n continue\n bop_to_canonical = np.eye(4, dtype=np.float32)\n bop_to_canonical[:3, :3] = np.asarray(meta_canon[obj_id]['R_to_canonical']).reshape(3, 3)\n bop_to_canonical[:3, 3] = np.asarray(meta_canon[obj_id]['t_to_canonical']).squeeze()\n canonical_offsets[obj_id] = np.linalg.inv(bop_to_canonical)\n if not cfg.USE_CANONICAL and cfg.USE_SYMMETRY: # adapt symmetries accordingly\n bop_symmetries = {}\n for obj_id, syms in symmetries.items():\n if obj_id == 0:\n continue\n to_canon = canonical_offsets[obj_id]\n\n bop_symmetries[obj_id] = {'num_symmetries': symmetries[obj_id]['num_symmetries']}\n canon_symmetries = []\n for sym in symmetries[obj_id]['symmetries']:\n sym = to_canon @ sym @ np.linalg.inv(to_canon)\n canon_symmetries.append(sym)\n bop_symmetries[obj_id]['symmetries'] = np.stack(canon_symmetries)\n symmetries = bop_symmetries\n\n # ============= GET DATASET SAMPLES ============\n samples_path = f\"sporeagent/{split}_posecnn_plane.pkl\" if split == \"test\" \\\n else f\"sporeagent/{split}_plane.pkl\"\n with open(os.path.join(self.dataset_path, samples_path), 'rb') as file:\n samples = pickle.load(file)\n if subsample > 0: # used for evaluation during training\n samples = samples[::subsample]\n # we precompute samples for 1/20th of the training set\n if self.split_name != \"test\":\n samples = samples[::5] # further subsample to 1/100th during training\n\n print(f\"preparing {len(samples)} {self.split_name} samples...\")\n pad_size = max([len(sample['obj_ids']) for sample in samples])\n per_frame_samples = []\n for sample in tqdm(samples):\n # prepare plane: optionally use provided plane pose for training (where given) and precompute normals\n if self.split != 'test' and 'cam_R_w2c' in sample['cam']: # use annotated supporting plane for training\n sample['plane'] = np.eye(4, dtype=np.float32)\n sample['plane'][:3, :3] = sample['cam']['cam_R_w2c']\n sample['plane'][:3, 3] = sample['cam']['cam_t_w2c'].squeeze()\n # else: pre-computed supporting plane (RANSAC) when not available and for testing\n if cfg.PRECOMPUTE_NORMALS: # for observed plane\n plane_src = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(sample['plane_pcd'][:, :3]))\n plane_src.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=50, max_nn=30))\n plane_src.orient_normals_to_align_with_direction([0, 0, -1])\n sample['plane_pcd'] = np.hstack(\n [sample['plane_pcd'][:, :3], np.asarray(plane_src.normals).astype(np.float32),\n sample['plane_pcd'][:, -1][:, None]]) # mask\n sample['plane_pcd'][:, -1] = sample['plane_pcd'][:, -1] == 0\n\n # unpack objects per frame\n num_objs = len(sample['obj_ids'])\n frame_samples = []\n for oi, obj_id in enumerate(sample['obj_ids']):\n m2c = np.eye(4, dtype=np.float32)\n m2c[:3, :3] = sample['gts'][oi]['cam_R_m2c']\n m2c[:3, 3] = sample['gts'][oi]['cam_t_m2c'].squeeze()\n\n if cfg.USE_CANONICAL: # adapt GT and est to canonical model space\n m2c = m2c @ canonical_offsets[obj_id]\n sample['ests'][oi] = sample['ests'][oi] @ canonical_offsets[obj_id]\n\n if cfg.PRECOMPUTE_NORMALS: # for observed objects\n src = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(sample['pcds'][oi][:, :3]))\n src.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=10, max_nn=30))\n src.orient_normals_to_align_with_direction([0, 0, -1])\n sample['pcds'][oi] = np.hstack(\n [sample['pcds'][oi][:, :3], np.asarray(src.normals).astype(np.float32),\n sample['pcds'][oi][:, -1][:, None]]) # mask\n\n obj_sample = {\n 'idx': len(per_frame_samples) * pad_size + oi,\n 'scene': sample['scene'], 'frame': sample['frame'], 'obj_id': sample['gts'][oi]['obj_id'],\n 'cam': sample['cam'], 'gt_m2c': m2c, 'est_m2c': sample['ests'][oi],\n 'plane_src': sample['plane_pcd'], 'plane_ref': models[0], 'plane_m2c': sample['plane'],\n 'num_frame_objects': num_objs,\n 'other_obj_ids': sample['obj_ids'][:oi] + sample['obj_ids'][oi + 1:] + [0] * (pad_size - num_objs),\n 'points_src': sample['pcds'][oi], 'points_ref': models[obj_id]\n }\n\n if symmetries is not None:\n obj_sample['symmetries'] = symmetries[obj_id]['symmetries']\n obj_sample['num_symmetries'] = symmetries[obj_id]['num_symmetries']\n\n frame_samples.append(obj_sample)\n per_frame_samples.append(frame_samples)\n return per_frame_samples, models, symmetries\n\n\ndef collate_data(data):\n if isinstance(data[0], list): # custom for per-frame samples (i.e., for YCBV)\n batch = []\n for frame in data:\n batch += frame\n batch = torch.utils.data.dataloader.default_collate(batch)\n # fix 'other_obj_ids'\n batch['other_obj_ids'] = torch.cat([other_obj_ids[:, None] for other_obj_ids in batch['other_obj_ids']], dim=1)\n else:\n batch = torch.utils.data.dataloader.default_collate(data)\n return batch\n" ]
[ [ "numpy.hstack", "numpy.sqrt", "torch.cat", "numpy.linalg.inv", "numpy.uint8", "numpy.eye", "numpy.vstack", "numpy.arange", "numpy.linalg.norm", "numpy.dstack", "numpy.stack", "numpy.asarray", "numpy.ones", "scipy.spatial.transform.rotation.Rotation.from_euler", "numpy.array", "numpy.zeros", "numpy.divide", "torch.utils.data.dataloader.default_collate" ] ]
sharmasaravanan/openface-Verification
[ "e2471e827eee2c86e92861f5fd59affa04c725ba" ]
[ "openface/torch_neural_net.py" ]
[ "# Copyright 2015-2016 Carnegie Mellon University\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"Module for Torch-based neural network usage.\"\"\"\r\n\r\nimport atexit\r\nimport binascii\r\nfrom subprocess import Popen, PIPE\r\nimport os\r\nimport os.path\r\nimport sys\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\nmyDir = os.path.dirname(os.path.realpath(__file__))\r\n\r\n# Workaround for non-standard terminals, originally reported in\r\n# https://github.com/cmusatyalab/openface/issues/66\r\nos.environ['TERM'] = 'linux'\r\n\r\n\r\nclass TorchNeuralNet:\r\n \"\"\"Use a `Torch <http://torch.ch>`_ subprocess for feature extraction.\"\"\"\r\n\r\n #: The default Torch model to use.\r\n defaultModel = os.path.join(myDir, '..', 'models', 'openface', 'nn4.small2.v1.t7')\r\n\r\n def __init__(self, model=defaultModel, imgDim=96, cuda=False):\r\n \"\"\"__init__(self, model=defaultModel, imgDim=96, cuda=False)\r\n\r\n Instantiate a 'TorchNeuralNet' object.\r\n\r\n Starts `openface_server.lua\r\n <https://github.com/cmusatyalab/openface/blob/master/openface/openface_server.lua>`_\r\n as a subprocess.\r\n\r\n :param model: The path to the Torch model to use.\r\n :type model: str\r\n :param imgDim: The edge length of the square input image.\r\n :type imgDim: int\r\n :param cuda: Flag to use CUDA in the subprocess.\r\n :type cuda: bool\r\n \"\"\"\r\n assert model is not None\r\n assert imgDim is not None\r\n assert cuda is not None\r\n\r\n self.cmd = ['/usr/bin/env', 'th', os.path.join(myDir, 'openface_server.lua'),\r\n '-model', model, '-imgDim', str(imgDim)]\r\n if cuda:\r\n self.cmd.append('-cuda')\r\n self.p = Popen(self.cmd, stdin=PIPE, stdout=PIPE, bufsize=0)\r\n\r\n def exitHandler():\r\n if self.p.poll() is None:\r\n self.p.kill()\r\n atexit.register(exitHandler)\r\n\r\n def forwardPath(self, imgPath):\r\n \"\"\"\r\n Perform a forward network pass of an image on disk.\r\n\r\n :param imgPath: The path to the image.\r\n :type imgPath: str\r\n :return: Vector of features extracted with the neural network.\r\n :rtype: numpy.ndarray\r\n \"\"\"\r\n assert imgPath is not None\r\n\r\n rc = self.p.poll()\r\n if rc is not None and rc != 0:\r\n raise Exception(\"\"\"\r\n\r\n\r\nOpenFace: `openface_server.lua` subprocess has died.\r\n\r\n+ Is the Torch command `th` on your PATH? Check with `which th`.\r\n+ If `th` is on your PATH, try running `./util/profile-network.lua`\r\n to see if Torch can correctly load and run the network.\r\n If this gives illegal instruction errors, see the section on\r\n this in our FAQ at http://cmusatyalab.github.io/openface/faq/\r\n+ See this GitHub issue if you are running on\r\n a non-64-bit machine:\r\n https://github.com/cmusatyalab/openface/issues/42\r\n+ Please post further issues to our mailing list at\r\n https://groups.google.com/forum/#!forum/cmu-openface\r\n\r\nDiagnostic information:\r\n\r\ncmd: {}\r\n\r\n============\r\n\r\nstdout: {}\r\n\"\"\".format(self.cmd, self.p.stdout.read()))\r\n\r\n self.p.stdin.write(imgPath + \"\\n\")\r\n output = self.p.stdout.readline()\r\n try:\r\n rep = [float(x) for x in output.strip().split(',')]\r\n rep = np.array(rep)\r\n return rep\r\n except Exception as e:\r\n self.p.kill()\r\n stdout, stderr = self.p.communicate()\r\n print(\"\"\"\r\n\r\n\r\nError getting result from Torch subprocess.\r\n\r\nLine read: {}\r\n\r\nException:\r\n\r\n{}\r\n\r\n============\r\n\r\nstdout: {}\r\n\"\"\".format(output, str(e), stdout))\r\n sys.exit(-1)\r\n\r\n def forward(self, rgbImg):\r\n \"\"\"\r\n Perform a forward network pass of an RGB image.\r\n\r\n :param rgbImg: RGB image to process. Shape: (imgDim, imgDim, 3)\r\n :type rgbImg: numpy.ndarray\r\n :return: Vector of features extracted from the neural network.\r\n :rtype: numpy.ndarray\r\n \"\"\"\r\n assert rgbImg is not None\r\n\r\n t = '/tmp/openface-torchwrap-{}.png'.format(\r\n binascii.b2a_hex(os.urandom(8)))\r\n bgrImg = cv2.cvtColor(rgbImg, cv2.COLOR_RGB2BGR)\r\n cv2.imwrite(t, bgrImg)\r\n rep = self.forwardPath(t)\r\n os.remove(t)\r\n return rep\r\n" ]
[ [ "numpy.array" ] ]
kerrywang/Advanced-Lane-Finding
[ "146fd169b9c9a0c58d2bd6103e147fdc4a26684d" ]
[ "lane_finding_pipeline/GradientFilter.py" ]
[ "from lane_finding_pipeline.piplineinterface import PipeLineInterface\nimport cv2\nimport numpy as np\nimport constant\nimport os\nclass GradientFilter(PipeLineInterface):\n def __init__(self, filter=[]):\n self.filters = filter\n\n def addFilter(self, filterClass):\n assert isinstance(filterClass, GradientFilter)\n self.filters.append(filterClass)\n\n def _findFilter(self, orien):\n avaibaleFilter = [filter_ for filter_ in self.filters if filter_.orientation == orien]\n if not avaibaleFilter:\n return None\n return avaibaleFilter[0]\n\n def process(self, image):\n xyMask = np.zeros(image.shape[:2])\n magDirMask = np.zeros(image.shape[:2])\n\n filterX = self._findFilter('x')\n filterY = self._findFilter('y')\n\n filterMag = self._findFilter('mag')\n filterDir = self._findFilter('dir')\n\n if filterX or filterY:\n gradX = filterX.process(image) if filterX else np.ones(image.shape[:2])\n gradY = filterY.process(image) if filterY else np.ones(image.shape[:2])\n xyMask[(gradX == 1) & (gradY == 1)] = 1\n\n if filterMag or filterDir:\n gradMag = filterMag.process(image) if filterMag else np.ones(image.shape[:2])\n gradDir = filterMag.process(image) if filterDir else np.ones(image.shape[:2])\n\n magDirMask[(gradMag == 1) & (gradDir == 1)] = 1\n\n resultBinary = np.zeros(image.shape[:2])\n resultBinary[(xyMask == 1) | (magDirMask == 1)] = 1\n return resultBinary\n\n\nclass SobelFilter(GradientFilter):\n def __init__(self, orient='x', sobel_kernel=3, thresh_min=0, thresh_max=255):\n self.orientation = orient\n self.threshMin = thresh_min\n self.threshMax = thresh_max\n self.kenelSize = sobel_kernel\n\n def process(self, image):\n # gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n sobelX = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=self.kenelSize)\n sobelY = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=self.kenelSize)\n if self.orientation == 'x':\n abs_sobel = np.absolute(sobelX)\n elif self.orientation == 'y':\n abs_sobel = np.absolute(sobelY)\n elif self.orientation == 'mag': # magnitude filter\n abs_sobel = np.sqrt(sobelX ** 2 + sobelY ** 2)\n else: # direction filter\n abs_sobel = np.arctan2(np.absolute(sobelY), np.absolute(sobelX))\n\n if self.orientation != 'dir':\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n else:\n scaled_sobel = abs_sobel\n\n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= self.threshMin) & (scaled_sobel <= self.threshMax)] = 1\n return binary_output\n\nif __name__ == \"__main__\":\n gf = GradientFilter()\n\n gf.addFilter(SobelFilter(orient='x', thresh_min=0, thresh_max=255))\n gf.addFilter(SobelFilter(orient='y', thresh_min=0, thresh_max=255))\n gf.addFilter(SobelFilter(orient='mag', sobel_kernel=9, thresh_min=0, thresh_max=255))\n gf.addFilter(SobelFilter(orient='dir', thresh_min=0, thresh_max=np.pi/2))\n\n binary = gf.process(cv2.imread(os.path.join(constant.getTestImagesDir(), \"test1.jpg\")))\n cv2.imshow('masked', binary)\n cv2.waitKey(0)" ]
[ [ "numpy.absolute", "numpy.sqrt", "numpy.ones", "numpy.max", "numpy.zeros_like", "numpy.zeros" ] ]
liuweiping2020/advForNLP
[ "cb4d21ead7a05826999edec2e6745e5301c4a19c" ]
[ "src/run.py" ]
[ "# coding: UTF-8\nimport time\nimport torch\nimport numpy as np\nfrom trains.train_eval import train, init_network\nfrom importlib import import_module\nimport argparse\n\nparser = argparse.ArgumentParser(description='Chinese Text Classification')\nparser.add_argument('--model', type=str, required=True,\n help='choose a model: TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer')\nparser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained')\nparser.add_argument('--word', default=False, type=bool, help='True for word, False for char')\nparser.add_argument('--epsilon', default=8 / 50, type=int)\nparser.add_argument('--alpha', default=2, type=int, help='Step size')\nparser.add_argument('--delta-init', default='zero', choices=['zero', 'random'],\n help='Perturbation initialization method')\nparser.add_argument('--attack-iters', default=7, type=int, help='Attack iterations')\nparser.add_argument('--delta_init', default='random', choices=['zero', 'random'],\n help='Perturbation initialization method')\nparser.add_argument('--sgdflag', default=\"fgsm\", type=str, help='choose a model: pgd,free,fgsm')\nparser.add_argument('--minibatch_replays', default=8, type=int)\n\nargs = parser.parse_args()\n\n\ndef doTrain():\n dataset = 'THUCNews' # 数据集\n # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random\n embedding = 'embedding_SougouNews.npz'\n if args.embedding == 'random':\n embedding = 'random'\n model_name = args.model # 'TextRCNN' # TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer\n if model_name == 'FastText':\n from commons.utils_fasttext import build_dataset, build_iterator, get_time_dif\n embedding = 'random'\n else:\n from utils import build_dataset, build_iterator, get_time_dif\n x = import_module('models.' + model_name)\n config = x.Config(dataset, embedding)\n np.random.seed(1)\n torch.manual_seed(1)\n torch.cuda.manual_seed_all(1)\n torch.backends.cudnn.deterministic = True # 保证每次结果一样\n start_time = time.time()\n print(\"Loading data...\")\n vocab, train_data, dev_data, test_data = build_dataset(config, args.word)\n train_iter = build_iterator(train_data, config)\n dev_iter = build_iterator(dev_data, config)\n test_iter = build_iterator(test_data, config)\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n config.n_vocab = len(vocab)\n model = x.Model(config).to(config.device)\n if model_name != 'Transformer':\n init_network(model)\n print(model.parameters)\n config.epsilon = args.epsilon\n config.alpha = args.alpha\n config.attack_iters = args.attack_iters\n config.sgdflag = args.sgdflag\n config.delta_init = args.delta_init\n config.minibatch_replays = args.minibatch_replays\n train(config, model, train_iter, dev_iter, test_iter)\n\n\nif __name__ == '__main__':\n doTrain()\n\n pass\n" ]
[ [ "torch.manual_seed", "numpy.random.seed", "torch.cuda.manual_seed_all" ] ]
gpaw789/weather_sim
[ "2b0137cc87c05d811771b5ffa413a477e685681d" ]
[ "master/GenerateWeather.py" ]
[ "import random\nimport datetime\nfrom time import sleep\nimport pickle\nimport helpers\nimport pandas as pd\n\n# structure\n# main() is text interface\n# build_position() is the function that builds the lat/long coodinates\n# running() is the function that runs while loop indefinitely and print the datastream\n# generate() is the function that generates the data\n\ndef generate(location, time, temperature, humidity):\n\n latlongele = helpers.geolocation(location)\n\n weather = [\"Rain\", \"Snow\", \"Sunny\"]\n # print data\n\n location_int = location\n lat = latlongele[0]\n long = latlongele[1]\n elevation = latlongele[2]\n position = \"{},{},{}\".format(lat,long,elevation)\n local_time = \"{}Z\".format(time.isoformat(timespec='seconds'))\n conditions_int = weather[random.randrange(3)]\n temperature = \"%+d\" % temperature\n pressure = helpers.pressure(elevation)\n humidity = humidity\n\n stream = \"{}|{}|{}|{}|{}|{}|{}\".format(location_int, position, local_time, conditions_int, temperature, pressure, humidity)\n\n return stream, float(temperature)\n\ndef build_position(positions_number):\n # purpose: for a given position number, ask user to type in each value\n # input: value in terms of string\n # output: e.g. [\"-100,200\", \"-500, 200\"]\n\n array_positions = [0]*int(positions_number)\n\n for i in range(0, len(array_positions)):\n long = input(\"Position {} Long: \".format(i))\n lat = input(\"Position {} Lat: \".format(i))\n foo = \"{},{}\".format(long,lat)\n array_positions[i] = foo\n\n return array_positions\n\n\n\ndef running(elements):\n # purpose: run the while loop forever, using variables\n # input: [variable1, variable2, variable3]\n # output: None\n\n # setting up values\n start_time_status = elements[0]\n data_per_second = elements[1]\n array_positions = elements[2]\n\n # examples of places\n places_list = [\"Sydney\", \"Melbourne\", \"Adelaide\", \"Perth\", \"Darwin\",\n \"Brisbane\", \"Hobart\", \"Alice Springs\", \"Cairns\", \"Newman\"]\n weather = [\"Rain\", \"Snow\", \"Sunny\"]\n\n # epoch time\n epoch = datetime.datetime(1970,1,1)\n i = datetime.datetime.now()\n seconds = int((i-epoch).total_seconds())\n\n # seed the initial values using neural network prediction\n array_input = []\n n = pickle.load(open(\"n_fit.p\", \"rb\"))\n for i in helpers.places:\n # feed_input is lat, long, ele dictionary\n feed_input = {\"lat\": helpers.places[i][0], \"long\": helpers.places[i][1], \"ele\": helpers.places[i][2]}\n array_input.append(feed_input)\n\n temperature = [n.predict(pd.DataFrame(feed,index=[0])) for feed in array_input]\n # temperature = [random.randrange(-20,50) for x in range(10)]\n humidity = [random.randrange(0, 100) for x in range(10)]\n\n # set the time\n time = datetime.datetime.now()\n\n # run loop indefinitely\n while True:\n for location in range(0, len(places_list)):\n stream, temp_output = generate(places_list[location], time, temperature[location], humidity[location])\n print(stream)\n\n # storing data and making the next variable prediction\n # temperature is limited to -20 and +50 C, and its move either up or down based on a random number generator\n # humidity is limited to 0 and 100 % and its move either up or down based on a random number generator\n time = datetime.datetime.now() + datetime.timedelta(hours=1)\n # temperature[location] = min(max(temp_output + round(random.uniform(-1,1), 2), -20), 50)\n seconds = int((time - epoch).total_seconds())\n feed_input = {\"lat\": helpers.places[places_list[location]][0],\n \"long\": helpers.places[places_list[location]][1],\n \"ele\": helpers.places[places_list[location]][2]}\n temperature[location] = n.predict(pd.DataFrame(feed_input,index=[0]))\n humidity[location] = min(max(humidity[location] + random.randrange(-1, 1, 1), 0), 100)\n sleep(1/data_per_second)\n\n return 0\n\n\ndef main():\n # purpose: Run through text to ask user questions\n # input: None\n # output: None\n\n try:\n print(\"Welcome! Key in the number for options\\n\")\n print(\"Would you like to customise? (1) Customise (2) Just show me what you got!\")\n custom_status = int(input(\"Enter Value: \"))\n if custom_status == 1:\n print(\"What is start time? (1) Now\\n\")\n start_time_status = int(input(\"Enter Value: \"))\n print(\"How many data per second?\\n\")\n data_per_second = int(input(\"Enter Value: \"))\n print(\"How many weather positions?\\n\")\n positions_number = int(input(\"Enter Value: \"))\n array_positions = build_position(positions_number)\n elements = [start_time_status, data_per_second, array_positions]\n running(elements)\n\n elif custom_status == 2:\n start_time_status = 1; data_per_second = 2; array_positions = [0]\n elements = [start_time_status, data_per_second, array_positions]\n running(elements)\n\n else:\n print(\"Invalid Number, Try again\\n\")\n main()\n return 0\n except KeyboardInterrupt: # press ctrl + c to cancel\n pass\n\n\n return 0\n\n\nmain()" ]
[ [ "pandas.DataFrame" ] ]
danielkelshaw/ConcreteDropout
[ "65be53d3ecf558992f1c473c90e206717946897b" ]
[ "tests/test_concrete_dropout.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom condrop.concrete_dropout import ConcreteDropout\n\n\nclass TestConcreteDropout:\n\n def test_forward(self):\n\n to_pass = torch.ones(5)\n\n cd = ConcreteDropout(\n weight_regulariser=1e-6,\n dropout_regulariser=1e-3\n )\n\n linear_layer = nn.Linear(5, 3)\n ret_tensor = cd(to_pass, linear_layer)\n\n assert isinstance(ret_tensor, torch.Tensor)\n assert ret_tensor.numel() == 3\n\n assert isinstance(cd.regularisation, Tensor)\n assert cd.regularisation.numel() == 1\n" ]
[ [ "torch.nn.Linear", "torch.ones" ] ]
bblais/pyndamics
[ "fc1552af4bd07ed36412c0455981bae050179ad7" ]
[ "examples/Reproduce ODE API.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('pylab', 'inline')\n\n\n# ## Reproducing PyMC3 ODE_API_introduction\n# \n# https://github.com/pymc-devs/pymc3/blob/master/docs/source/notebooks/ODE_API_introduction.ipynb\n# \n# text for the equations taken from this original. All simulations done with emcee and pyndamics.\n\n# In[2]:\n\n\nfrom scipy.integrate import odeint\n\n\n# In[3]:\n\n\nfrom pyndamics import Simulation\nfrom pyndamics.emcee import *\n\n\n# # A Differential Equation For Freefall\n# \n# An object of mass $m$ is brought to some height and allowed to fall freely until it reaches the ground. A differential equation describing the object's speed over time is \n# \n# $$ y' = mg - \\gamma y $$\n# \n# The force the object experiences in the downwards direction is $mg$, while the force the object experiences in the opposite direction (due to air resistance) is proportional to how fast the object is presently moving. Let's assume the object starts from rest (that is, that the object's inital velocity is 0). This may or may not be the case. To showcase how to do inference on intial conditions, I will first assume the object starts from rest, and then relax that assumption later.\n# \n# Data on this object's speed as a function of time is shown below. The data may be noisy because of our measurement tools, or because the object is an irregular shape, thus leading to times during freefall when the object is more/less aerodynamic. Let's use this data to estimate the proportionality constant for air resistance.\n# \n# \n\n# In[4]:\n\n\n# For reproducibility\nnp.random.seed(20394)\n\ndef freefall(y, t, p): \n return 2.0*p[1] - p[0]*y[0]\n\n# Times for observation\ntimes = np.arange(0,10,0.5)\ngamma,g, y0, sigma = 0.4, 9.8, -2, 2\ny = odeint(freefall, t=times, y0=y0, args=tuple([[gamma,g]]))\nyobs = np.random.normal(y,2)\n\nfig, ax = plt.subplots(dpi=120)\nplt.plot(times,yobs, label='observed speed', linestyle='dashed', marker='o', color='red')\nplt.plot(times,y, label='True speed', color='k', alpha=0.5)\nplt.legend()\nplt.xlabel('Time (Seconds)')\nplt.ylabel(r'$y(t)$');\nplt.show()\n\n\n# ```python\n# ode_model = DifferentialEquation(\n# func=freefall,\n# times=times,\n# n_states=1, n_theta=2,\n# t0=0\n# )\n# \n# with pm.Model() as model:\n# # Specify prior distributions for soem of our model parameters\n# sigma = pm.HalfCauchy('sigma',1) \n# gamma = pm.Lognormal('gamma',0,1)\n# \n# # If we know one of the parameter values, we can simply pass the value.\n# ode_solution = ode_model(y0=[0], theta=[gamma, 9.8])\n# # The ode_solution has a shape of (n_times, n_states)\n# \n# Y = pm.Normal('Y', mu=ode_solution, sd=sigma, observed=yobs)\n# \n# prior = pm.sample_prior_predictive()\n# trace = pm.sample(2000, tune=1000, cores=1)\n# posterior_predictive = pm.sample_posterior_predictive(trace)\n# \n# data = az.from_pymc3(trace=trace, prior=prior, posterior_predictive=posterior_predictive)\n# ```\n\n# In[5]:\n\n\nsim=Simulation()\nsim.add(\"y' = m*g - γ*y\",0)\nsim.params(m=2,g=9.8,γ=0.4)\nsim.add_data(t=times,y=yobs)\nsim.run(0,10)\n\n\n# In[6]:\n\n\nt,y=sim.t,sim.y\nplot(t,y)\n\nt,y=sim.components[0].data['t'],sim.components[0].data['value']\nplot(t,y,'o')\n\n\n# In[7]:\n\n\nmodel=MCMCModel(sim,\n _sigma_y=Uniform(0.1,5),\n γ=Uniform(0,1),\n )\n\n\n# In[8]:\n\n\nmodel.run_mcmc(300,repeat=2)\nmodel.plot_chains()\n\n\n# In[9]:\n\n\nmodel.plot_distributions()\n\n\n# In[10]:\n\n\nmodel.plot_many(0,10,'y')\n\n\n# In[11]:\n\n\nmodel=MCMCModel(sim,\n _sigma_y=HalfCauchy(1),\n γ=LogNormal(0,1),\n )\n\n\n# In[12]:\n\n\nmodel.run_mcmc(300,repeat=2)\nmodel.plot_chains()\n\n\n# In[13]:\n\n\nmodel.plot_distributions()\n\n\n# In[14]:\n\n\nmodel.plot_many(0,10,'y')\n\n\n# ```python\n# with pm.Model() as model3: \n# sigma = pm.HalfCauchy('sigma',1)\n# gamma = pm.Lognormal('gamma',0,1)\n# g = pm.Lognormal('g',pm.math.log(10),2)\n# # Initial condition prior. We think it is at rest, but will allow for perturbations in initial velocity.\n# y0 = pm.Normal('y0', 0, 2)\n# \n# ode_solution = ode_model(y0=[y0], theta=[gamma, g])\n# \n# Y = pm.Normal('Y', mu=ode_solution, sd=sigma, observed=yobs)\n# \n# prior = pm.sample_prior_predictive()\n# trace = pm.sample(2000, tune=1000, target_accept=0.9, cores=1)\n# posterior_predictive = pm.sample_posterior_predictive(trace)\n# \n# data = az.from_pymc3(trace=trace, prior=prior, posterior_predictive=posterior_predictive)\n# ```\n\n# In[15]:\n\n\nmodel=MCMCModel(sim,\n γ=LogNormal(0,1),\n g=LogNormal(log(10),2),\n initial_y=Normal(0,2),\n )\n\n\n# In[16]:\n\n\nmodel.run_mcmc(300,repeat=2)\nmodel.plot_chains()\n\n\n# In[17]:\n\n\nmodel.plot_distributions()\n\n\n# In[18]:\n\n\nmodel.plot_many(0,10,'y')\n\n\n# ## SIR Model\n\n# In[19]:\n\n\ndef SIR(y, t, p):\n ds = -p[0]*y[0]*y[1]\n di = p[0]*y[0]*y[1] - p[1]*y[1] \n return [ds, di]\n\ntimes = np.arange(0,5,0.25)\n\nbeta,gamma = 4,1.0\n# Create true curves\ny = odeint(SIR, t=times, y0=[0.99, 0.01], args=((beta,gamma),), rtol=1e-8)\n# Observational model. Lognormal likelihood isn't appropriate, but we'll do it anyway\nyobs = np.random.lognormal(mean=np.log(y[1::]), sigma=[0.2, 0.3])\n\n\nplt.plot(times[1::],yobs, marker='o', linestyle='none')\nplt.plot(times, y[:,0], color='C0', alpha=0.5, label=f'$S(t)$')\nplt.plot(times, y[:,1], color ='C1', alpha=0.5, label=f'$I(t)$')\nplt.legend()\nplt.show()\n\n\n# ```python\n# sir_model = DifferentialEquation(\n# func=SIR, \n# times=np.arange(0.25, 5, 0.25), \n# n_states=2,\n# n_theta=2,\n# t0=0,\n# )\n# \n# with pm.Model() as model4: \n# sigma = pm.HalfCauchy('sigma', 1, shape=2)\n# \n# # R0 is bounded below by 1 because we see an epidemic has occured\n# R0 = pm.Bound(pm.Normal, lower=1)('R0', 2,3)\n# lam = pm.Lognormal('lambda',pm.math.log(2),2)\n# beta = pm.Deterministic('beta', lam*R0)\n# \n# sir_curves = sir_model(y0=[0.99, 0.01], theta=[beta, lam])\n# \n# Y = pm.Lognormal('Y', mu=pm.math.log(sir_curves), sd=sigma, observed=yobs)\n# \n# prior = pm.sample_prior_predictive()\n# trace = pm.sample(2000,tune=1000, target_accept=0.9, cores=1)\n# posterior_predictive = pm.sample_posterior_predictive(trace)\n# \n# data = az.from_pymc3(trace=trace, prior = prior, posterior_predictive = posterior_predictive)\n# ```\n\n# In[20]:\n\n\nsim=Simulation()\nsim.add(\"S'= -β*S*I\",0.99,plot=1)\nsim.add(\"I'= +β*S*I - γ*I\",0.01,plot=1)\nsim.params(β=4,γ=1)\nsim.add_data(t=times[1:],S=yobs[:,0],plot=1)\nsim.add_data(t=times[1:],I=yobs[:,1],plot=1)\nsim.run(0,5)\n\n\n# In[21]:\n\n\nmodel=MCMCModel(sim, # _sigma_S and _sigma_I default to Jeffreys\n β=Normal(0,20,all_positive=True),\n γ=Normal(0,20,all_positive=True),\n )\n\n\n# In[22]:\n\n\nmodel.run_mcmc(300,repeat=2)\nmodel.plot_chains()\n\n\n# In[23]:\n\n\nmodel.plot_distributions()\n\n\n# In[24]:\n\n\nmodel.plot_many(0,5,['S','I'])\n\n\n# In[25]:\n\n\nR0=model.eval('β/γ')\n\n\n# In[26]:\n\n\nmodel.plot_distributions(R0=R0)\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "scipy.integrate.odeint" ] ]
RamanLab/iCOMIC
[ "1310bd51641ce28d4193fa21a002767ca434fc23" ]
[ "icomic/deseq_tsv.py" ]
[ "#!/usr/bin/env python3\nimport numpy as np\nimport pandas as pd\nimport os\n\ntest_dir = './results/em_results'\n\nsample = []\nunit = []\ncondition = []\n\nfor file in os.listdir(test_dir):\n if file.endswith(\".counts\"):\n sample.append(file)\n else:\n pass\n \n\n\nfor i in range(len(sample)):\n base = os.path.splitext(sample[i])[0]\n units = base.split('_')[1] + \"_\" + base.split('_')[2]\n unit.append(units)\n condition.append(base.split('_')[1])\n\n \nunits_data = pd.DataFrame({'sample': sample,\n 'unit': unit,\n 'condition': condition})\nunits_data.to_csv('./results/em_results/emtable.tsv', sep = '\\t', index=False)" ]
[ [ "pandas.DataFrame" ] ]
poposca/digit_classifier
[ "63d1515b576b6f984fcf1eea5c6a4d6bc040be16" ]
[ "tensorflow_examples/lite/model_maker/core/task/configs.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Configurations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_examples.lite.model_maker.core import compat\nfrom tensorflow_examples.lite.model_maker.core.api import mm_export\n\nDEFAULT_QUANTIZATION_STEPS = 2000\n\n\ndef _get_representative_dataset_gen(dataset, num_steps):\n \"\"\"Gets the function that generates representative dataset for quantized.\"\"\"\n\n def representative_dataset_gen():\n \"\"\"Generates representative dataset for quantized.\"\"\"\n if compat.get_tf_behavior() == 2:\n for image, _ in dataset.take(num_steps):\n yield [image]\n else:\n iterator = tf.compat.v1.data.make_initializable_iterator(\n dataset.take(num_steps))\n next_element = iterator.get_next()\n with tf.compat.v1.Session() as sess:\n sess.run(iterator.initializer)\n while True:\n try:\n image, _ = sess.run(next_element)\n yield [image]\n except tf.errors.OutOfRangeError:\n break\n\n return representative_dataset_gen\n\n\n@mm_export('config.QuantizationConfig')\nclass QuantizationConfig(object):\n \"\"\"Configuration for post-training quantization.\n\n Refer to\n https://www.tensorflow.org/lite/performance/post_training_quantization\n for different post-training quantization options.\n \"\"\"\n\n def __init__(\n self,\n optimizations=None,\n representative_data=None,\n quantization_steps=None,\n inference_input_type=None,\n inference_output_type=None,\n supported_ops=None,\n supported_types=None,\n experimental_new_quantizer=None,\n ):\n \"\"\"Constructs QuantizationConfig.\n\n Args:\n optimizations: A list of optimizations to apply when converting the model.\n If not set, use `[Optimize.DEFAULT]` by default.\n representative_data: A DataLoader holding representative data for\n post-training quantization.\n quantization_steps: Number of post-training quantization calibration steps\n to run.\n inference_input_type: Target data type of real-number input arrays. Allows\n for a different type for input arrays. Defaults to None. If set, must be\n be `{tf.float32, tf.uint8, tf.int8}`.\n inference_output_type: Target data type of real-number output arrays.\n Allows for a different type for output arrays. Defaults to None. If set,\n must be `{tf.float32, tf.uint8, tf.int8}`.\n supported_ops: Set of OpsSet options supported by the device. Used to Set\n converter.target_spec.supported_ops.\n supported_types: List of types for constant values on the target device.\n Supported values are types exported by lite.constants. Frequently, an\n optimization choice is driven by the most compact (i.e. smallest) type\n in this list (default [constants.FLOAT]).\n experimental_new_quantizer: Whether to enable experimental new quantizer.\n \"\"\"\n\n if optimizations is None:\n optimizations = [tf.lite.Optimize.DEFAULT]\n if not isinstance(optimizations, list):\n optimizations = [optimizations]\n self.optimizations = optimizations\n\n self.representative_data = representative_data\n if self.representative_data is not None and quantization_steps is None:\n quantization_steps = DEFAULT_QUANTIZATION_STEPS\n self.quantization_steps = quantization_steps\n\n self.inference_input_type = inference_input_type\n self.inference_output_type = inference_output_type\n\n if supported_ops is not None and not isinstance(supported_ops, list):\n supported_ops = [supported_ops]\n self.supported_ops = supported_ops\n\n if supported_types is not None and not isinstance(supported_types, list):\n supported_types = [supported_types]\n self.supported_types = supported_types\n\n self.experimental_new_quantizer = experimental_new_quantizer\n\n @classmethod\n def create_dynamic_range_quantization(cls,\n optimizations=tf.lite.Optimize.DEFAULT):\n \"\"\"Creates configuration for dynamic range quantization.\"\"\"\n return QuantizationConfig(optimizations)\n\n @classmethod\n def create_full_integer_quantization(\n cls,\n representative_data,\n quantization_steps=DEFAULT_QUANTIZATION_STEPS,\n optimizations=tf.lite.Optimize.DEFAULT,\n inference_input_type=tf.uint8,\n inference_output_type=tf.uint8,\n is_integer_only=False):\n \"\"\"Creates configuration for full integer quantization.\n\n Args:\n representative_data: Representative data used for post-training\n quantization.\n quantization_steps: Number of post-training quantization calibration steps\n to run.\n optimizations: A list of optimizations to apply when converting the model.\n If not set, use `[Optimize.DEFAULT]` by default.\n inference_input_type: Target data type of real-number input arrays. Used\n only when `is_integer_only` is True. Must be in `{tf.uint8, tf.int8}`.\n inference_output_type: Target data type of real-number output arrays. Used\n only when `is_integer_only` is True. Must be in `{tf.uint8, tf.int8}`.\n is_integer_only: If True, enforces full integer quantization for all ops\n including the input and output. If False, uses integer with float\n fallback (using default float input/output) that mean to fully integer\n quantize a model, but use float operators when they don't have an\n integer implementation.\n\n Returns:\n QuantizationConfig.\n \"\"\"\n if not is_integer_only:\n return QuantizationConfig(\n optimizations,\n representative_data=representative_data,\n quantization_steps=quantization_steps)\n else:\n if inference_input_type not in [tf.uint8, tf.int8]:\n raise ValueError('For integer only quantization, '\n '`inference_input_type` '\n 'should be tf.uint8 or tf.int8.')\n if inference_output_type not in [tf.uint8, tf.int8]:\n raise ValueError('For integer only quantization, '\n '`inference_output_type` '\n 'should be tf.uint8 or tf.int8.')\n\n return QuantizationConfig(\n optimizations,\n representative_data=representative_data,\n quantization_steps=quantization_steps,\n inference_input_type=inference_input_type,\n inference_output_type=inference_output_type,\n supported_ops=[tf.lite.OpsSet.TFLITE_BUILTINS_INT8])\n\n @classmethod\n def create_float16_quantization(cls, optimizations=tf.lite.Optimize.DEFAULT):\n \"\"\"Creates configuration for float16 quantization.\"\"\"\n return QuantizationConfig(optimizations, supported_types=[tf.float16])\n\n def get_converter_with_quantization(self, converter, **kwargs):\n \"\"\"Gets TFLite converter with settings for quantization.\"\"\"\n converter.optimizations = self.optimizations\n\n if self.representative_data is not None:\n ds = self.representative_data.gen_dataset(\n batch_size=1, is_training=False, **kwargs)\n converter.representative_dataset = tf.lite.RepresentativeDataset(\n _get_representative_dataset_gen(ds, self.quantization_steps))\n\n if self.inference_input_type:\n converter.inference_input_type = self.inference_input_type\n if self.inference_output_type:\n converter.inference_output_type = self.inference_output_type\n if self.supported_ops:\n converter.target_spec.supported_ops = self.supported_ops\n if self.supported_types:\n converter.target_spec.supported_types = self.supported_types\n\n if self.experimental_new_quantizer is not None:\n converter.experimental_new_quantizer = self.experimental_new_quantizer\n return converter\n" ]
[ [ "tensorflow.compat.v1.Session" ] ]
highlight0112/pyscf
[ "4afbd42bad3e72db5bb94d8cacf1d5de76537bdd", "4afbd42bad3e72db5bb94d8cacf1d5de76537bdd" ]
[ "pyscf/tdscf/rks.py", "pyscf/grad/casscf.py" ]
[ "#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n# Ref:\n# Chem Phys Lett, 256, 454\n# J. Mol. Struct. THEOCHEM, 914, 3\n#\n\nfrom functools import reduce\nimport numpy\nfrom pyscf import lib\nfrom pyscf import symm\nfrom pyscf.dft import numint\nfrom pyscf.tdscf import rhf\nfrom pyscf.scf import hf_symm\nfrom pyscf.ao2mo import _ao2mo\nfrom pyscf.data import nist\nfrom pyscf.soscf.newton_ah import _gen_rhf_response\nfrom pyscf import __config__\n\n# Low excitation filter to avoid numerical instability\nPOSTIVE_EIG_THRESHOLD = getattr(__config__, 'tdscf_rhf_TDDFT_positive_eig_threshold', 1e-3)\n\n\nclass TDA(rhf.TDA):\n def nuc_grad_method(self):\n from pyscf.grad import tdrks\n return tdrks.Gradients(self)\n\nclass TDDFT(rhf.TDHF):\n def nuc_grad_method(self):\n from pyscf.grad import tdrks\n return tdrks.Gradients(self)\n\nRPA = TDRKS = TDDFT\n\nclass TDDFTNoHybrid(TDA):\n ''' Solve (A-B)(A+B)(X+Y) = (X+Y)w^2\n '''\n def gen_vind(self, mf):\n wfnsym = self.wfnsym\n singlet = self.singlet\n\n mol = mf.mol\n mo_coeff = mf.mo_coeff\n assert(mo_coeff.dtype == numpy.double)\n mo_energy = mf.mo_energy\n mo_occ = mf.mo_occ\n nao, nmo = mo_coeff.shape\n occidx = numpy.where(mo_occ==2)[0]\n viridx = numpy.where(mo_occ==0)[0]\n nocc = len(occidx)\n nvir = len(viridx)\n orbv = mo_coeff[:,viridx]\n orbo = mo_coeff[:,occidx]\n\n if wfnsym is not None and mol.symmetry:\n if isinstance(wfnsym, str):\n wfnsym = symm.irrep_name2id(mol.groupname, wfnsym)\n wfnsym = wfnsym % 10 # convert to D2h subgroup\n orbsym = hf_symm.get_orbsym(mol, mo_coeff) % 10\n sym_forbid = (orbsym[occidx,None] ^ orbsym[viridx]) != wfnsym\n\n e_ia = (mo_energy[viridx].reshape(-1,1) - mo_energy[occidx]).T\n if wfnsym is not None and mol.symmetry:\n e_ia[sym_forbid] = 0\n d_ia = numpy.sqrt(e_ia).ravel()\n ed_ia = e_ia.ravel() * d_ia\n hdiag = e_ia.ravel() ** 2\n\n vresp = _gen_rhf_response(mf, singlet=singlet, hermi=1)\n\n def vind(zs):\n nz = len(zs)\n dmov = numpy.empty((nz,nao,nao))\n for i, z in enumerate(zs):\n # *2 for double occupancy\n dm = reduce(numpy.dot, (orbo, (d_ia*z).reshape(nocc,nvir)*2, orbv.T))\n dmov[i] = dm + dm.T # +cc for A+B and K_{ai,jb} in A == K_{ai,bj} in B\n v1ao = vresp(dmov)\n v1ov = _ao2mo.nr_e2(v1ao, mo_coeff, (0,nocc,nocc,nmo)).reshape(-1,nocc*nvir)\n for i, z in enumerate(zs):\n # numpy.sqrt(e_ia) * (e_ia*d_ia*z + v1ov)\n v1ov[i] += ed_ia*z\n v1ov[i] *= d_ia\n return v1ov.reshape(nz,-1)\n\n return vind, hdiag\n\n def kernel(self, x0=None, nstates=None):\n '''TDDFT diagonalization solver\n '''\n mf = self._scf\n if mf._numint.libxc.is_hybrid_xc(mf.xc):\n raise RuntimeError('%s cannot be used with hybrid functional'\n % self.__class__)\n self.check_sanity()\n self.dump_flags()\n if nstates is None:\n nstates = self.nstates\n else:\n self.nstates = nstates\n\n log = lib.logger.Logger(self.stdout, self.verbose)\n\n vind, hdiag = self.gen_vind(self._scf)\n precond = self.get_precond(hdiag)\n if x0 is None:\n x0 = self.init_guess(self._scf, self.nstates)\n\n def pickeig(w, v, nroots, envs):\n idx = numpy.where(w > POSTIVE_EIG_THRESHOLD**2)[0]\n return w[idx], v[:,idx], idx\n\n self.converged, w2, x1 = \\\n lib.davidson1(vind, x0, precond,\n tol=self.conv_tol,\n nroots=nstates, lindep=self.lindep,\n max_space=self.max_space, pick=pickeig,\n verbose=log)\n\n mo_energy = self._scf.mo_energy\n mo_occ = self._scf.mo_occ\n occidx = numpy.where(mo_occ==2)[0]\n viridx = numpy.where(mo_occ==0)[0]\n e_ia = (mo_energy[viridx,None] - mo_energy[occidx]).T\n e_ia = numpy.sqrt(e_ia)\n def norm_xy(w, z):\n zp = e_ia * z.reshape(e_ia.shape)\n zm = w/e_ia * z.reshape(e_ia.shape)\n x = (zp + zm) * .5\n y = (zp - zm) * .5\n norm = lib.norm(x)**2 - lib.norm(y)**2\n norm = numpy.sqrt(.5/norm) # normalize to 0.5 for alpha spin\n return (x*norm, y*norm)\n\n idx = numpy.where(w2 > POSTIVE_EIG_THRESHOLD**2)[0]\n self.e = numpy.sqrt(w2[idx])\n self.xy = [norm_xy(self.e[i], x1[i]) for i in idx]\n\n if self.chkfile:\n lib.chkfile.save(self.chkfile, 'tddft/e', self.e)\n lib.chkfile.save(self.chkfile, 'tddft/xy', self.xy)\n\n log.note('Excited State energies (eV)\\n%s', self.e * nist.HARTREE2EV)\n return self.e, self.xy\n\n def nuc_grad_method(self):\n from pyscf.grad import tdrks\n return tdrks.Gradients(self)\n\n\nclass dRPA(TDDFTNoHybrid):\n def __init__(self, mf):\n if not getattr(mf, 'xc', None):\n raise RuntimeError(\"direct RPA can only be applied with DFT; for HF+dRPA, use .xc='hf'\")\n from pyscf import scf\n mf = scf.addons.convert_to_rhf(mf)\n mf.xc = ''\n TDDFTNoHybrid.__init__(self, mf)\n\nTDH = dRPA\n\nclass dTDA(TDA):\n def __init__(self, mf):\n if not getattr(mf, 'xc', None):\n raise RuntimeError(\"direct TDA can only be applied with DFT; for HF+dTDA, use .xc='hf'\")\n from pyscf import scf\n mf = scf.addons.convert_to_rhf(mf)\n mf.xc = ''\n TDA.__init__(self, mf)\n\nfrom pyscf import dft\ndft.rks.RKS.TDA = dft.rks_symm.RKS.TDA = lib.class_as_method(TDA)\ndft.rks.RKS.TDHF = dft.rks_symm.RKS.TDHF = None\ndft.rks.RKS.TDDFT = dft.rks_symm.RKS.TDDFT = lib.class_as_method(TDDFT)\ndft.rks.RKS.TDDFTNoHybrid = dft.rks_symm.RKS.TDDFTNoHybrid = lib.class_as_method(TDDFTNoHybrid)\ndft.rks.RKS.dTDA = dft.rks_symm.RKS.dTDA = lib.class_as_method(dTDA)\ndft.rks.RKS.dRPA = dft.rks_symm.RKS.dRPA = lib.class_as_method(dRPA)\ndft.roks.ROKS.TDA = dft.rks_symm.ROKS.TDA = None\ndft.roks.ROKS.TDHF = dft.rks_symm.ROKS.TDHF = None\ndft.roks.ROKS.TDDFT = dft.rks_symm.ROKS.TDDFT = None\ndft.roks.ROKS.TDDFTNoHybrid = dft.rks_symm.ROKS.TDDFTNoHybrid = None\ndft.roks.ROKS.dTDA = dft.rks_symm.ROKS.dTDA = None\ndft.roks.ROKS.dRPA = dft.rks_symm.ROKS.dRPA = None\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import scf\n from pyscf import dft\n mol = gto.Mole()\n mol.verbose = 0\n mol.output = None\n\n mol.atom = [\n ['H' , (0. , 0. , .917)],\n ['F' , (0. , 0. , 0.)], ]\n mol.basis = '631g'\n mol.build()\n\n mf = dft.RKS(mol)\n mf.xc = 'lda, vwn_rpa'\n mf.scf()\n td = mf.TDDFTNoHybrid()\n #td.verbose = 5\n td.nstates = 5\n print(td.kernel()[0] * 27.2114)\n# [ 9.74227238 9.74227238 14.85153818 30.35019348 30.35019348]\n td.singlet = False\n print(td.kernel()[0] * 27.2114)\n# [ 9.08754045 9.08754045 12.48375957 29.66870808 29.66870808]\n\n mf = dft.RKS(mol)\n mf.xc = 'b88,p86'\n mf.scf()\n td = mf.TDDFT()\n td.nstates = 5\n #td.verbose = 5\n print(td.kernel()[0] * 27.2114)\n# [ 9.82204435 9.82204435 15.0410193 30.01373062 30.01373062]\n td.singlet = False\n print(td.kernel()[0] * 27.2114)\n# [ 9.09322358 9.09322358 12.29843139 29.26731075 29.26731075]\n\n mf = dft.RKS(mol)\n mf.xc = 'lda,vwn'\n mf.scf()\n td = mf.TDA()\n print(td.kernel()[0] * 27.2114)\n# [ 9.68872769 9.68872769 15.07122478]\n td.singlet = False\n #td.verbose = 5\n print(td.kernel()[0] * 27.2114)\n# [ 9.0139312 9.0139312 12.42444659]\n\n mf = dft.RKS(mol)\n mf.xc = 'lda,vwn'\n mf.scf()\n td = dRPA(mf)\n td.nstates = 5\n print(td.kernel()[0] * 27.2114)\n# [ 10.00343861 10.00343861 15.62586305 30.69238874 30.69238874]\n\n mf = dft.RKS(mol)\n mf.xc = 'lda,vwn'\n mf.scf()\n td = dTDA(mf)\n td.nstates = 5\n print(td.kernel()[0] * 27.2114)\n# [ 10.05245288 10.05245288 16.03497655 30.7120363 30.7120363 ]\n\n", "#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nCASSCF analytical nuclear gradients\n\nRef.\nJ. Comput. Chem., 5, 589\n'''\n\nimport time\nfrom functools import reduce\nimport numpy\nfrom pyscf import lib\nfrom pyscf import ao2mo\nfrom pyscf.lib import logger\nfrom pyscf.grad import rhf as rhf_grad\nfrom pyscf.grad.mp2 import _shell_prange\n\n\ndef kernel(mc, mo_coeff=None, ci=None, atmlst=None, mf_grad=None,\n verbose=None):\n if mo_coeff is None: mo_coeff = mc.mo_coeff\n if ci is None: ci = mc.ci\n if mf_grad is None: mf_grad = mc._scf.nuc_grad_method()\n if mc.frozen is not None:\n raise NotImplementedError\n\n mol = mc.mol\n ncore = mc.ncore\n ncas = mc.ncas\n nocc = ncore + ncas\n nelecas = mc.nelecas\n nao, nmo = mo_coeff.shape\n nao_pair = nao * (nao+1) // 2\n\n mo_occ = mo_coeff[:,:nocc]\n mo_core = mo_coeff[:,:ncore]\n mo_cas = mo_coeff[:,ncore:nocc]\n\n casdm1, casdm2 = mc.fcisolver.make_rdm12(ci, ncas, nelecas)\n\n# gfock = Generalized Fock, Adv. Chem. Phys., 69, 63\n dm_core = numpy.dot(mo_core, mo_core.T) * 2\n dm_cas = reduce(numpy.dot, (mo_cas, casdm1, mo_cas.T))\n aapa = ao2mo.kernel(mol, (mo_cas, mo_cas, mo_occ, mo_cas), compact=False)\n aapa = aapa.reshape(ncas,ncas,nocc,ncas)\n vj, vk = mc._scf.get_jk(mol, (dm_core, dm_cas))\n h1 = mc.get_hcore()\n vhf_c = vj[0] - vk[0] * .5\n vhf_a = vj[1] - vk[1] * .5\n gfock = reduce(numpy.dot, (mo_occ.T, h1 + vhf_c + vhf_a, mo_occ)) * 2\n gfock[:,ncore:nocc] = reduce(numpy.dot, (mo_occ.T, h1 + vhf_c, mo_cas, casdm1))\n gfock[:,ncore:nocc] += numpy.einsum('uviw,vuwt->it', aapa, casdm2)\n dme0 = reduce(numpy.dot, (mo_occ, (gfock+gfock.T)*.5, mo_occ.T))\n aapa = vj = vk = vhf_c = vhf_a = h1 = gfock = None\n\n dm1 = dm_core + dm_cas\n vhf1c, vhf1a = mf_grad.get_veff(mol, (dm_core, dm_cas))\n hcore_deriv = mf_grad.hcore_generator(mol)\n s1 = mf_grad.get_ovlp(mol)\n\n diag_idx = numpy.arange(nao)\n diag_idx = diag_idx * (diag_idx+1) // 2 + diag_idx\n casdm2_cc = casdm2 + casdm2.transpose(0,1,3,2)\n dm2buf = ao2mo._ao2mo.nr_e2(casdm2_cc.reshape(ncas**2,ncas**2), mo_cas.T,\n (0, nao, 0, nao)).reshape(ncas**2,nao,nao)\n dm2buf = lib.pack_tril(dm2buf)\n dm2buf[:,diag_idx] *= .5\n dm2buf = dm2buf.reshape(ncas,ncas,nao_pair)\n casdm2 = casdm2_cc = None\n\n if atmlst is None:\n atmlst = range(mol.natm)\n aoslices = mol.aoslice_by_atom()\n de = numpy.zeros((len(atmlst),3))\n\n max_memory = mc.max_memory - lib.current_memory()[0]\n blksize = int(max_memory*.9e6/8 / ((aoslices[:,3]-aoslices[:,2]).max()*nao_pair))\n blksize = min(nao, max(2, blksize))\n\n for k, ia in enumerate(atmlst):\n shl0, shl1, p0, p1 = aoslices[ia]\n h1ao = hcore_deriv(ia)\n de[k] += numpy.einsum('xij,ij->x', h1ao, dm1)\n de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], dme0[p0:p1]) * 2\n\n q1 = 0\n for b0, b1, nf in _shell_prange(mol, 0, mol.nbas, blksize):\n q0, q1 = q1, q1 + nf\n dm2_ao = lib.einsum('ijw,pi,qj->pqw', dm2buf, mo_cas[p0:p1], mo_cas[q0:q1])\n shls_slice = (shl0,shl1,b0,b1,0,mol.nbas,0,mol.nbas)\n eri1 = mol.intor('int2e_ip1', comp=3, aosym='s2kl',\n shls_slice=shls_slice).reshape(3,p1-p0,nf,nao_pair)\n de[k] -= numpy.einsum('xijw,ijw->x', eri1, dm2_ao) * 2\n eri1 = None\n de[k] += numpy.einsum('xij,ij->x', vhf1c[:,p0:p1], dm1[p0:p1]) * 2\n de[k] += numpy.einsum('xij,ij->x', vhf1a[:,p0:p1], dm_core[p0:p1]) * 2\n\n de += mf_grad.grad_nuc(mol, atmlst)\n return de\n\ndef as_scanner(mcscf_grad):\n '''Generating a nuclear gradients scanner/solver (for geometry optimizer).\n\n The returned solver is a function. This function requires one argument\n \"mol\" as input and returns energy and first order nuclear derivatives.\n\n The solver will automatically use the results of last calculation as the\n initial guess of the new calculation. All parameters assigned in the\n nuc-grad object and SCF object (DIIS, conv_tol, max_memory etc) are\n automatically applied in the solver.\n\n Note scanner has side effects. It may change many underlying objects\n (_scf, with_df, with_x2c, ...) during calculation.\n\n Examples:\n\n >>> from pyscf import gto, scf, mcscf\n >>> mol = gto.M(atom='N 0 0 0; N 0 0 1.1', verbose=0)\n >>> mc_grad_scanner = mcscf.CASSCF(scf.RHF(mol), 4, 4).nuc_grad_method().as_scanner()\n >>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.1'))\n >>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.5'))\n '''\n from pyscf import gto\n if isinstance(mcscf_grad, lib.GradScanner):\n return mcscf_grad\n\n logger.info(mcscf_grad, 'Create scanner for %s', mcscf_grad.__class__)\n\n class CASSCF_GradScanner(mcscf_grad.__class__, lib.GradScanner):\n def __init__(self, g):\n lib.GradScanner.__init__(self, g)\n def __call__(self, mol_or_geom, **kwargs):\n if isinstance(mol_or_geom, gto.Mole):\n mol = mol_or_geom\n else:\n mol = self.mol.set_geom_(mol_or_geom, inplace=False)\n\n mc_scanner = self.base\n e_tot = mc_scanner(mol)\n self.mol = mol\n de = self.kernel(**kwargs)\n return e_tot, de\n return CASSCF_GradScanner(mcscf_grad)\n\n\nclass Gradients(lib.StreamObject):\n '''Non-relativistic restricted Hartree-Fock gradients'''\n def __init__(self, mc):\n self.base = mc\n self.mol = mc.mol\n self.stdout = mc.stdout\n self.verbose = mc.verbose\n self.max_memory = mc.max_memory\n self.atmlst = None\n self.de = None\n self._keys = set(self.__dict__.keys())\n\n def dump_flags(self):\n log = logger.Logger(self.stdout, self.verbose)\n log.info('\\n')\n if not self.base.converged:\n log.warn('Ground state CASSCF not converged')\n log.info('******** %s for %s ********',\n self.__class__, self.base.__class__)\n log.info('max_memory %d MB (current use %d MB)',\n self.max_memory, lib.current_memory()[0])\n return self\n\n def kernel(self, mo_coeff=None, ci=None, atmlst=None, mf_grad=None,\n verbose=None):\n cput0 = (time.clock(), time.time())\n log = logger.new_logger(self, verbose)\n if atmlst is None:\n atmlst = self.atmlst\n else:\n self.atmlst = atmlst\n\n if self.verbose >= logger.WARN:\n self.check_sanity()\n if self.verbose >= logger.INFO:\n self.dump_flags()\n\n if self.verbose >= logger.WARN:\n self.check_sanity()\n if self.verbose >= logger.INFO:\n self.dump_flags()\n\n self.de = kernel(self.base, mo_coeff, ci, atmlst, mf_grad, log)\n log.timer('CASSCF gradients', *cput0)\n self._finalize()\n return self.de\n\n def _finalize(self):\n if self.verbose >= logger.NOTE:\n logger.note(self, '--------------- %s gradients ---------------',\n self.base.__class__.__name__)\n rhf_grad._write(self, self.mol, self.de, self.atmlst)\n logger.note(self, '----------------------------------------------')\n\n as_scanner = as_scanner\n\nGrad = Gradients\n\nfrom pyscf import mcscf\nmcscf.mc1step.CASSCF.Gradients = lib.class_as_method(Gradients)\n\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import scf\n from pyscf import mcscf\n\n mol = gto.Mole()\n mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2'\n mol.basis = '631g'\n mol.build()\n mf = scf.RHF(mol).run()\n mc = mcscf.CASSCF(mf, 4, 4).run()\n de = mc.Gradients().kernel()\n print(lib.finger(de) - 0.019602220578635747)\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.atom = 'N 0 0 0; N 0 0 1.2'\n mol.basis = 'sto3g'\n mol.build()\n mf = scf.RHF(mol).run()\n mc = mcscf.CASSCF(mf, 4, 4).run()\n de = kernel(mc)\n\n mcs = mc.as_scanner()\n mol.set_geom_('N 0 0 0; N 0 0 1.201')\n e1 = mcs(mol)\n mol.set_geom_('N 0 0 0; N 0 0 1.199')\n e2 = mcs(mol)\n print(de[1,2], (e1-e2)/0.002*lib.param.BOHR)\n" ]
[ [ "numpy.where", "numpy.sqrt", "numpy.empty" ], [ "numpy.arange", "numpy.dot", "numpy.einsum" ] ]
Unifall/DEKR
[ "3f410bcab420166b030508efd6c71a027c66d5b5" ]
[ "lib/dataset/CrowdPoseDataset.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# The code is based on HigherHRNet-Human-Pose-Estimation.\n# (https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation)\n# Modified by Zigang Geng ([email protected]).\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nfrom collections import OrderedDict\nimport logging\nimport os\nimport os.path\n\nimport cv2\nimport json_tricks as json\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom crowdposetools.cocoeval import COCOeval\nfrom DEKR.lib.utils import zipreader\nfrom DEKR.lib.utils.rescore import CrowdRescoreEval\n\nlogger = logging.getLogger(__name__)\n\n\nclass CrowdPoseDataset(Dataset):\n def __init__(self, cfg, dataset):\n from crowdposetools.coco import COCO\n self.root = cfg.DATASET.ROOT\n self.dataset = dataset\n self.data_format = cfg.DATASET.DATA_FORMAT\n self.coco = COCO(self._get_anno_file_name())\n self.ids = list(self.coco.imgs.keys())\n\n cats = [cat['name']\n for cat in self.coco.loadCats(self.coco.getCatIds())]\n self.classes = ['__background__'] + cats\n logger.info('=> classes: {}'.format(self.classes))\n self.num_classes = len(self.classes)\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))\n self._coco_ind_to_class_ind = dict(\n [\n (self._class_to_coco_ind[cls], self._class_to_ind[cls])\n for cls in self.classes[1:]\n ]\n )\n\n def _get_anno_file_name(self):\n # example: root/json/crowdpose_{train,val,test}.json\n dataset = 'trainval' if 'rescore' in self.dataset else self.dataset\n return os.path.join(\n self.root,\n 'json',\n 'crowdpose_{}.json'.format(\n dataset\n )\n )\n\n def _get_image_path(self, file_name):\n images_dir = os.path.join(self.root, 'images')\n if self.data_format == 'zip':\n return images_dir + '.zip@' + file_name\n else:\n return os.path.join(images_dir, file_name)\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.\n \"\"\"\n coco = self.coco\n img_id = self.ids[index]\n ann_ids = coco.getAnnIds(imgIds=img_id)\n target = coco.loadAnns(ann_ids)\n image_info = coco.loadImgs(img_id)[0]\n\n file_name = image_info['file_name']\n\n if self.data_format == 'zip':\n img = zipreader.imread(\n self._get_image_path(file_name),\n cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION\n )\n else:\n img = cv2.imread(\n self._get_image_path(file_name),\n cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION\n )\n\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n if 'train' in self.dataset:\n return img, [obj for obj in target], image_info\n else: \n return img\n\n def __len__(self):\n return len(self.ids)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}'.format(self.root)\n return fmt_str\n\n def processKeypoints(self, keypoints):\n tmp = keypoints.copy()\n if keypoints[:, 2].max() > 0:\n num_keypoints = keypoints.shape[0]\n for i in range(num_keypoints):\n tmp[i][0:3] = [\n float(keypoints[i][0]),\n float(keypoints[i][1]),\n float(keypoints[i][2])\n ]\n\n return tmp\n\n def evaluate(self, cfg, preds, scores, output_dir, tag,\n *args, **kwargs):\n '''\n Perform evaluation on COCO keypoint task\n :param cfg: cfg dictionary\n :param preds: prediction\n :param output_dir: output directory\n :param args: \n :param kwargs: \n :return: \n '''\n res_folder = os.path.join(output_dir, 'results')\n if not os.path.exists(res_folder):\n os.makedirs(res_folder)\n res_file = os.path.join(\n res_folder, 'keypoints_%s_results.json' % (self.dataset+tag))\n\n # preds is a list of: image x person x (keypoints)\n # keypoints: num_joints * 4 (x, y, score, tag)\n kpts = defaultdict(list)\n for idx, _kpts in enumerate(preds):\n img_id = self.ids[idx]\n file_name = self.coco.loadImgs(img_id)[0]['file_name']\n for idx_kpt, kpt in enumerate(_kpts):\n area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * \\\n (np.max(kpt[:, 1]) - np.min(kpt[:, 1]))\n kpt = self.processKeypoints(kpt)\n\n kpts[int(file_name.split('.')[0])].append(\n {\n 'keypoints': kpt[:, 0:3],\n 'score': scores[idx][idx_kpt],\n 'image': int(file_name.split('.')[0]),\n 'area': area\n }\n )\n\n # rescoring and oks nms\n oks_nmsed_kpts = []\n # image x person x (keypoints)\n for img in kpts.keys():\n # person x (keypoints)\n img_kpts = kpts[img]\n # person x (keypoints)\n # do not use nms, keep all detections\n keep = []\n if len(keep) == 0:\n oks_nmsed_kpts.append(img_kpts)\n else:\n oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])\n\n self._write_coco_keypoint_results(\n oks_nmsed_kpts, res_file\n )\n\n # CrowdPose `test` set has annotation.\n info_str = self._do_python_keypoint_eval(\n res_file, res_folder\n )\n name_value = OrderedDict(info_str)\n return name_value, name_value['AP']\n\n def _write_coco_keypoint_results(self, keypoints, res_file):\n data_pack = [\n {\n 'cat_id': self._class_to_coco_ind[cls],\n 'cls_ind': cls_ind,\n 'cls': cls,\n 'ann_type': 'keypoints',\n 'keypoints': keypoints\n }\n for cls_ind, cls in enumerate(self.classes) if not cls == '__background__'\n ]\n\n results = self._coco_keypoint_results_one_category_kernel(data_pack[0])\n logger.info('=> Writing results json to %s' % res_file)\n with open(res_file, 'w') as f:\n json.dump(results, f, sort_keys=True, indent=4)\n try:\n json.load(open(res_file))\n except Exception:\n content = []\n with open(res_file, 'r') as f:\n for line in f:\n content.append(line)\n content[-1] = ']'\n with open(res_file, 'w') as f:\n for c in content:\n f.write(c)\n\n def _coco_keypoint_results_one_category_kernel(self, data_pack):\n cat_id = data_pack['cat_id']\n keypoints = data_pack['keypoints']\n cat_results = []\n num_joints = 14\n\n for img_kpts in keypoints:\n if len(img_kpts) == 0:\n continue\n\n _key_points = np.array(\n [img_kpts[k]['keypoints'] for k in range(len(img_kpts))]\n )\n key_points = np.zeros(\n (_key_points.shape[0], num_joints * 3),\n dtype=np.float\n )\n\n for ipt in range(num_joints):\n key_points[:, ipt * 3 + 0] = _key_points[:, ipt, 0]\n key_points[:, ipt * 3 + 1] = _key_points[:, ipt, 1]\n # keypoints score.\n key_points[:, ipt * 3 + 2] = _key_points[:, ipt, 2]\n\n for k in range(len(img_kpts)):\n kpt = key_points[k].reshape((num_joints, 3))\n left_top = np.amin(kpt, axis=0)\n right_bottom = np.amax(kpt, axis=0)\n\n w = right_bottom[0] - left_top[0]\n h = right_bottom[1] - left_top[1]\n\n cat_results.append({\n 'image_id': img_kpts[k]['image'],\n 'category_id': cat_id,\n 'keypoints': list(key_points[k]),\n 'score': img_kpts[k]['score'],\n 'bbox': list([left_top[0], left_top[1], w, h])\n })\n\n return cat_results\n\n def _do_python_keypoint_eval(self, res_file, res_folder):\n coco_dt = self.coco.loadRes(res_file)\n coco_eval = COCOeval(self.coco, coco_dt, 'keypoints')\n coco_eval.params.useSegm = None\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n stats_names = ['AP', 'Ap .5', 'AP .75', 'AR', 'AR .5',\n 'AR .75', 'AP (easy)', 'AP (medium)', 'AP (hard)']\n stats_index = [0, 1, 2, 5, 6, 7, 8, 9, 10]\n\n info_str = []\n for ind, name in enumerate(stats_names):\n info_str.append((name, coco_eval.stats[stats_index[ind]]))\n # info_str.append(coco_eval.stats[ind])\n\n return info_str\n\n\nclass CrowdPoseRescoreDataset(CrowdPoseDataset):\n def __init__(self, cfg, dataset):\n CrowdPoseDataset.__init__(self, cfg, dataset)\n\n def evaluate(self, cfg, preds, scores, output_dir,\n *args, **kwargs):\n res_folder = os.path.join(output_dir, 'results')\n if not os.path.exists(res_folder):\n os.makedirs(res_folder)\n res_file = os.path.join(\n res_folder, 'keypoints_%s_results.json' % self.dataset)\n\n kpts = defaultdict(list)\n for idx, _kpts in enumerate(preds):\n img_id = self.ids[idx]\n file_name = self.coco.loadImgs(img_id)[0]['file_name']\n for idx_kpt, kpt in enumerate(_kpts):\n area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * \\\n (np.max(kpt[:, 1]) - np.min(kpt[:, 1]))\n kpt = self.processKeypoints(kpt)\n\n kpts[int(file_name.split('.')[0])].append(\n {\n 'keypoints': kpt[:, 0:3],\n 'score': scores[idx][idx_kpt],\n 'image': int(file_name.split('.')[0]),\n 'area': area\n }\n )\n\n oks_nmsed_kpts = []\n for img in kpts.keys():\n img_kpts = kpts[img]\n keep = []\n if len(keep) == 0:\n oks_nmsed_kpts.append(img_kpts)\n else:\n oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])\n\n self._write_coco_keypoint_results(\n oks_nmsed_kpts, res_file\n )\n\n self._do_python_keypoint_eval(\n cfg.RESCORE.DATA_FILE, res_file, res_folder\n )\n\n def _do_python_keypoint_eval(self, data_file, res_file, res_folder):\n coco_dt = self.coco.loadRes(res_file)\n coco_eval = CrowdRescoreEval(self.coco, coco_dt, 'keypoints')\n coco_eval.params.useSegm = None\n coco_eval.evaluate()\n coco_eval.dumpdataset(data_file)" ]
[ [ "numpy.amax", "numpy.min", "numpy.amin", "numpy.max", "numpy.zeros" ] ]
jairideout/onecodex
[ "905d533376b808a0b2ea74c2e9c5ea1e87754a81" ]
[ "onecodex/viz/_distance.py" ]
[ "# -*- coding: utf-8 -*-\nfrom itertools import chain\nimport warnings\n\nfrom onecodex.exceptions import OneCodexException\nfrom onecodex.distance import DistanceMixin\n\n\nclass VizDistanceMixin(DistanceMixin):\n def _compute_distance(self, rank, metric):\n if rank is None:\n raise OneCodexException(\"Please specify a rank or 'auto' to choose automatically\")\n\n # if taxonomy trees are inconsistent, unifrac will not work\n if callable(metric):\n distances = metric(self, rank=rank)\n elif metric in (\"braycurtis\", \"bray-curtis\", \"bray curtis\"):\n distances = self.beta_diversity(metric=\"braycurtis\", rank=rank)\n elif metric in (\"manhattan\", \"cityblock\"):\n distances = self.beta_diversity(metric=\"cityblock\", rank=rank)\n elif metric == \"jaccard\":\n distances = self.beta_diversity(metric=\"jaccard\", rank=rank)\n elif metric in (\"unifrac\", \"weighted_unifrac\"):\n distances = self.unifrac(weighted=True, rank=rank)\n elif metric == \"unweighted_unifrac\":\n distances = self.unifrac(weighted=False, rank=rank)\n else:\n raise OneCodexException(\n \"Metric must be one of: braycurtis, manhattan, jaccard, \"\n \"weighted_unifrac, unweighted_unifrac\"\n )\n\n return distances\n\n def _cluster_by_sample(self, rank=\"auto\", metric=\"braycurtis\", linkage=\"average\"):\n from scipy.cluster import hierarchy\n from scipy.spatial.distance import squareform\n from sklearn.metrics.pairwise import euclidean_distances\n\n if metric == \"euclidean\":\n dist_matrix = euclidean_distances(self._results).round(6)\n else:\n dist_matrix = self._compute_distance(rank=rank, metric=metric).to_data_frame().round(6)\n clustering = hierarchy.linkage(squareform(dist_matrix), method=linkage)\n scipy_tree = hierarchy.dendrogram(clustering, no_plot=True)\n ids_in_order = [self._results.index[int(x)] for x in scipy_tree[\"ivl\"]]\n\n return {\n \"dist_matrix\": dist_matrix,\n \"clustering\": clustering,\n \"scipy_tree\": scipy_tree,\n \"ids_in_order\": ids_in_order,\n }\n\n def _cluster_by_taxa(self, linkage=\"average\"):\n from scipy.cluster import hierarchy\n from scipy.spatial.distance import squareform\n from sklearn.metrics.pairwise import euclidean_distances\n\n dist_matrix = euclidean_distances(self._results.T).round(6)\n clustering = hierarchy.linkage(squareform(dist_matrix), method=linkage)\n scipy_tree = hierarchy.dendrogram(clustering, no_plot=True)\n ids_in_order = [self._results.T.index[int(x)] for x in scipy_tree[\"ivl\"]]\n labels_in_order = [\"{} ({})\".format(self.taxonomy[\"name\"][t], t) for t in ids_in_order]\n\n return {\n \"dist_matrix\": dist_matrix,\n \"clustering\": clustering,\n \"scipy_tree\": scipy_tree,\n \"ids_in_order\": ids_in_order,\n \"labels_in_order\": labels_in_order,\n }\n\n def plot_distance(\n self,\n rank=\"auto\",\n metric=\"braycurtis\",\n title=None,\n xlabel=None,\n ylabel=None,\n tooltip=None,\n return_chart=False,\n linkage=\"average\",\n label=None,\n ):\n \"\"\"Plot beta diversity distance matrix as a heatmap and dendrogram.\n\n Parameters\n ----------\n rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional\n Analysis will be restricted to abundances of taxa at the specified level.\n metric : {'braycurtis', 'manhattan', 'jaccard', 'unifrac', 'unweighted_unifrac}, optional\n Function to use when calculating the distance between two samples.\n linkage : {'average', 'single', 'complete', 'weighted', 'centroid', 'median'}\n The type of linkage to use when clustering axes.\n title : `string`, optional\n Text label at the top of the plot.\n xlabel : `string`, optional\n Text label along the horizontal axis.\n ylabel : `string`, optional\n Text label along the vertical axis.\n tooltip : `string` or `list`, optional\n A string or list containing strings representing metadata fields. When a point in the\n plot is hovered over, the value of the metadata associated with that sample will be\n displayed in a modal.\n label : `string` or `callable`, optional\n A metadata field (or function) used to label each analysis. If passing a function, a\n dict containing the metadata for each analysis is passed as the first and only\n positional argument. The callable function must return a string.\n\n Examples\n --------\n Plot the weighted UniFrac distance between all our samples, using counts at the genus level.\n\n >>> plot_distance(rank='genus', metric='unifrac')\n \"\"\"\n import altair as alt\n import numpy as np\n import pandas as pd\n from onecodex.viz import dendrogram\n\n if len(self._results) < 2:\n raise OneCodexException(\n \"`plot_distance` requires 2 or more valid classification results.\"\n )\n\n # this will be passed to the heatmap chart as a dataframe eventually\n plot_data = {\"1) Label\": [], \"2) Label\": [], \"Distance\": [], \"classification_id\": []}\n\n # here we figure out what to put in the tooltips and get the appropriate data\n if tooltip:\n if not isinstance(tooltip, list):\n tooltip = [tooltip]\n else:\n tooltip = []\n\n tooltip.insert(0, \"Label\")\n\n magic_metadata, magic_fields = self._metadata_fetch(tooltip, label=label)\n formatted_fields = []\n\n for _, magic_field in magic_fields.items():\n field_group = []\n\n for i in (1, 2):\n field = \"{}) {}\".format(i, magic_field)\n plot_data[field] = []\n field_group.append(field)\n\n formatted_fields.append(field_group)\n\n clust = self._cluster_by_sample(rank=rank, metric=metric, linkage=linkage)\n\n # must convert to long format for heatmap plotting\n for idx1, id1 in enumerate(clust[\"dist_matrix\"].index):\n for idx2, id2 in enumerate(clust[\"dist_matrix\"].index):\n if idx1 == idx2:\n plot_data[\"Distance\"].append(np.nan)\n else:\n plot_data[\"Distance\"].append(clust[\"dist_matrix\"].iloc[idx1, idx2])\n\n plot_data[\"classification_id\"].append(id1)\n\n for field_group, magic_field in zip(formatted_fields, magic_fields.values()):\n plot_data[field_group[0]].append(magic_metadata[magic_field][id1])\n plot_data[field_group[1]].append(magic_metadata[magic_field][id2])\n\n plot_data = pd.DataFrame(data=plot_data)\n\n labels_in_order = magic_metadata[\"Label\"][clust[\"ids_in_order\"]].tolist()\n\n # it's important to tell altair to order the cells in the heatmap according to the clustering\n # obtained from scipy\n alt_kwargs = dict(\n x=alt.X(\"1) Label:N\", axis=alt.Axis(title=xlabel), sort=labels_in_order),\n y=alt.Y(\n \"2) Label:N\", axis=alt.Axis(title=ylabel, orient=\"right\"), sort=labels_in_order\n ),\n color=\"Distance:Q\",\n tooltip=list(chain.from_iterable(formatted_fields)) + [\"Distance:Q\"],\n href=\"url:N\",\n url=\"https://app.onecodex.com/classification/\" + alt.datum.classification_id,\n )\n\n chart = (\n alt.Chart(\n plot_data,\n width=15 * len(clust[\"dist_matrix\"].index),\n height=15 * len(clust[\"dist_matrix\"].index),\n )\n .transform_calculate(url=alt_kwargs.pop(\"url\"))\n .mark_rect()\n .encode(**alt_kwargs)\n )\n\n if title:\n chart = chart.properties(title=title)\n\n dendro_chart = dendrogram(clust[\"scipy_tree\"])\n\n if return_chart:\n return dendro_chart | chart\n else:\n (dendro_chart | chart).display()\n\n def plot_mds(\n self,\n rank=\"auto\",\n metric=\"braycurtis\",\n method=\"pcoa\",\n title=None,\n xlabel=None,\n ylabel=None,\n color=None,\n size=None,\n tooltip=None,\n return_chart=False,\n label=None,\n ):\n \"\"\"Plot beta diversity distance matrix using multidimensional scaling (MDS).\n\n Parameters\n ----------\n rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional\n Analysis will be restricted to abundances of taxa at the specified level.\n metric : {'braycurtis', 'manhattan', 'jaccard', 'unifrac', 'unweighted_unifrac}, optional\n Function to use when calculating the distance between two samples.\n method : {'pcoa', 'smacof'}\n Algorithm to use for ordination. PCoA uses eigenvalue decomposition and is not well\n suited to non-euclidean distance functions. SMACOF is an iterative optimization strategy\n that can be used as an alternative.\n title : `string`, optional\n Text label at the top of the plot.\n xlabel : `string`, optional\n Text label along the horizontal axis.\n ylabel : `string`, optional\n Text label along the vertical axis.\n size : `string` or `tuple`, optional\n A string or a tuple containing strings representing metadata fields. The size of points\n in the resulting plot will change based on the metadata associated with each sample.\n color : `string` or `tuple`, optional\n A string or a tuple containing strings representing metadata fields. The color of points\n in the resulting plot will change based on the metadata associated with each sample.\n tooltip : `string` or `list`, optional\n A string or list containing strings representing metadata fields. When a point in the\n plot is hovered over, the value of the metadata associated with that sample will be\n displayed in a modal.\n label : `string` or `callable`, optional\n A metadata field (or function) used to label each analysis. If passing a function, a\n dict containing the metadata for each analysis is passed as the first and only\n positional argument. The callable function must return a string.\n\n Examples\n --------\n Scatter plot of weighted UniFrac distance between all our samples, using counts at the genus\n level.\n\n >>> plot_mds(rank='genus', metric='unifrac')\n\n Notes\n -----\n **For `smacof`**: The values reported on the axis labels are Pearson's correlations between\n the distances between points on each axis alone, and the corresponding distances in the\n distance matrix calculated using the user-specified metric. These values are related to the\n effectiveness of the MDS algorithm in placing points on the scatter plot in such a way that\n they truly represent the calculated distances. They do not reflect how well the distance\n metric captures similarities between the underlying data (in this case, an OTU table).\n \"\"\"\n import altair as alt\n import numpy as np\n import pandas as pd\n from scipy.spatial.distance import squareform\n from scipy.stats import pearsonr\n from skbio.stats import ordination\n from sklearn import manifold\n from sklearn.metrics.pairwise import euclidean_distances\n\n if len(self._results) < 2:\n raise OneCodexException(\"`plot_mds` requires 2 or more valid classification results.\")\n\n dists = self._compute_distance(rank, metric).to_data_frame()\n\n # here we figure out what to put in the tooltips and get the appropriate data\n if tooltip:\n if not isinstance(tooltip, list):\n tooltip = [tooltip]\n else:\n tooltip = []\n\n tooltip.insert(0, \"Label\")\n\n if color and color not in tooltip:\n tooltip.insert(1, color)\n\n if size and size not in tooltip:\n tooltip.insert(2, size)\n\n magic_metadata, magic_fields = self._metadata_fetch(tooltip, label=label)\n\n if method == \"smacof\":\n # adapted from https://scikit-learn.org/stable/auto_examples/manifold/plot_mds.html\n x_field = \"MDS1\"\n y_field = \"MDS2\"\n\n seed = np.random.RandomState(seed=3)\n mds = manifold.MDS(\n max_iter=3000, eps=1e-12, random_state=seed, dissimilarity=\"precomputed\", n_jobs=1\n )\n pos = mds.fit(dists).embedding_\n plot_data = pd.DataFrame(pos, columns=[x_field, y_field], index=dists.index)\n plot_data = plot_data.div(plot_data.abs().max(axis=0), axis=1) # normalize to [0,1]\n\n # determine how much of the original distance is captured by each of the axes after MDS.\n # this implementation of MDS does not use eigen decomposition and so there's no simple\n # way of returning a 'percent of variance explained' value\n r_squared = []\n\n for axis in [0, 1]:\n mds_dist = pos.copy()\n mds_dist[::, axis] = 0\n mds_dist = squareform(euclidean_distances(mds_dist).round(6))\n r_squared.append(pearsonr(mds_dist, squareform(dists))[0])\n\n # label the axes\n x_extra_label = \"r² = %.02f\" % (r_squared[0],)\n y_extra_label = \"r² = %.02f\" % (r_squared[1],)\n elif method == \"pcoa\":\n # suppress eigenvalue warning from skbio--not because it's an invalid warning, but\n # because lots of folks in the field run pcoa on these distances functions, even if\n # statistically inappropriate. perhaps this will change if we ever become more\n # opinionated about the analyses that we allow our users to do (roo)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n ord_result = ordination.pcoa(\n dists.round(6)\n ) # round to avoid float precision errors\n\n plot_data = ord_result.samples.iloc[:, [0, 1]] # get first two components\n plot_data = plot_data.div(plot_data.abs().max(axis=0), axis=1) # normalize to [0,1]\n plot_data.index = dists.index\n x_field, y_field = plot_data.columns.tolist() # name of first two components\n\n x_extra_label = \"%0.02f%%\" % (ord_result.proportion_explained[0] * 100,)\n y_extra_label = \"%0.02f%%\" % (ord_result.proportion_explained[1] * 100,)\n else:\n raise OneCodexException(\"MDS method must be one of: smacof, pcoa\")\n\n # label the axes\n if xlabel is None:\n xlabel = \"{} ({})\".format(x_field, x_extra_label)\n if ylabel is None:\n ylabel = \"{} ({})\".format(y_field, y_extra_label)\n\n plot_data = pd.concat([plot_data, magic_metadata], axis=1).reset_index()\n\n alt_kwargs = dict(\n x=alt.X(x_field, axis=alt.Axis(title=xlabel)),\n y=alt.Y(y_field, axis=alt.Axis(title=ylabel)),\n tooltip=[magic_fields[t] for t in tooltip],\n href=\"url:N\",\n url=\"https://app.onecodex.com/classification/\" + alt.datum.classification_id,\n )\n\n # only add these parameters if they are in use\n if color:\n alt_kwargs[\"color\"] = magic_fields[color]\n if size:\n alt_kwargs[\"size\"] = magic_fields[size]\n\n chart = (\n alt.Chart(plot_data)\n .transform_calculate(url=alt_kwargs.pop(\"url\"))\n .mark_circle()\n .encode(**alt_kwargs)\n )\n\n if title:\n chart = chart.properties(title=title)\n\n if return_chart:\n return chart\n else:\n chart.interactive().display()\n" ]
[ [ "pandas.concat", "pandas.DataFrame", "sklearn.manifold.MDS", "sklearn.metrics.pairwise.euclidean_distances", "scipy.cluster.hierarchy.dendrogram", "scipy.spatial.distance.squareform", "numpy.random.RandomState" ] ]
vilmar-hillow/kaggle_tgs_salt
[ "8e4db82b08abbf44ed803a0800402ae48dc7ff86" ]
[ "build_submit.py" ]
[ "from pathlib import Path\nimport cv2\nimport numpy as np\nimport pandas as pd\n\n\npred_folder = Path('./predictions/')\n\n\ndef RLenc(img, order='F', format=True):\n \"\"\"\n img is binary mask image, shape (r,c)\n order is down-then-right, i.e. Fortran\n format determines if the order needs to be preformatted (according to submission rules) or not\n\n returns run length as an array or string (if format is True)\n \"\"\"\n bytes = img.reshape(img.shape[0] * img.shape[1], order=order)\n runs = [] # list of run lengths\n r = 0 # the current run length\n pos = 1 # count starts from 1 per WK\n for c in bytes:\n if c == 0:\n if r != 0:\n runs.append((pos, r))\n pos += r\n r = 0\n pos += 1\n else:\n r += 1\n\n # if last run is unsaved (i.e. data ends with 1)\n if r != 0:\n runs.append((pos, r))\n pos += r\n\n if format:\n z = ''\n\n for rr in runs:\n z += '{} {} '.format(rr[0], rr[1])\n return z[:-1]\n else:\n return runs\n\n\ndef read_mask(path):\n mask = cv2.imread(str(path), 0)\n\n return mask / 255\n\n\npred_dict = {}\nfolds = [\"0\", \"1\", \"2\", \"3\", \"4\"]\nfor fold in folds:\n folder = pred_folder / Path(fold)\n files = [f for f in folder.glob('*')]\n for fn in files:\n mask = read_mask(fn)\n if fold == \"0\":\n pred_dict[fn.stem] = mask\n else:\n pred_dict[fn.stem] += mask\n\nfinal_dict = {}\nfor key, value in pred_dict.items():\n average = value / len(folds)\n mask = np.where(average > 0.5, 1, 0)\n final_dict[key] = RLenc(mask)\n\nsub = pd.DataFrame.from_dict(final_dict, orient='index')\nsub.index.names = ['id']\nsub.columns = ['rle_mask']\nsub.to_csv('submission_5_folds_resnext_0.5thr_300ep_256_01finetuned.csv')\n\n" ]
[ [ "numpy.where", "pandas.DataFrame.from_dict" ] ]
PacktPublishing/-Introduction-to-Bayesian-Analysis-in-Python
[ "e0ab762a6cd4423b59d0dbf22e8224028b88c29d" ]
[ "Section 3/3.2.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport scipy.stats as stats\nfrom IPython.core.pylabtools import figsize\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfigsize(12.5, 9)\n\nnorm_pdf = stats.norm.pdf\n\nplt.subplot(311)\nx = np.linspace(0, 60000, 200)\nsp1 = plt.fill_between(x , 0, norm_pdf(x, 35000, 7500), \n color = \"#348ABD\", lw = 3, alpha = 0.6,\n label = \"historical total prices\")\np1 = plt.Rectangle((0, 0), 1, 1, fc=sp1.get_facecolor()[0])\nplt.legend([p1], [sp1.get_label()])\n\nplt.subplot(312)\nx = np.linspace(0, 10000, 200)\nsp2 = plt.fill_between(x , 0, norm_pdf(x, 3000, 500), \n color = \"#A60628\", lw = 3, alpha = 0.6,\n label=\"snowblower price guess\")\n\np2 = plt.Rectangle((0, 0), 1, 1, fc=sp2.get_facecolor()[0])\nplt.legend([p2], [sp2.get_label()])\n\nplt.subplot(313)\nx = np.linspace(0, 25000, 200)\nsp3 = plt.fill_between(x , 0, norm_pdf(x, 12000, 3000), \n color = \"#7A68A6\", lw = 3, alpha = 0.6,\n label = \"Trip price guess\")\nplt.autoscale(tight=True)\np3 = plt.Rectangle((0, 0), 1, 1, fc=sp3.get_facecolor()[0])\nplt.legend([p3], [sp3.get_label()]);\n\n\n# In[8]:\n\n\nimport pymc3 as pm\n\ndata_mu = [3e3, 12e3]\n\ndata_std = [5e2, 3e3] \n\nmu_prior = 35e3\nstd_prior = 75e2\nwith pm.Model() as model:\n true_price = pm.Normal(\"true_price\", mu=mu_prior, sd=std_prior)\n \n prize_1 = pm.Normal(\"first_prize\", mu=data_mu[0], sd=data_std[0])\n prize_2 = pm.Normal(\"second_prize\", mu=data_mu[1], sd=data_std[1])\n price_estimate = prize_1 + prize_2\n \n logp = pm.Normal.dist(mu=price_estimate, sd=(3e3)).logp(true_price)\n error = pm.Potential(\"error\", logp)\n \n\n trace = pm.sample(50000, step=pm.Metropolis())\n burned_trace = trace[10000:]\n\nprice_trace = burned_trace[\"true_price\"]\n\n\n# In[9]:\n\n\nfigsize(12.5, 4)\n\nimport scipy.stats as stats\n\nx = np.linspace(5000, 40000)\nplt.plot(x, stats.norm.pdf(x, 35000, 7500), c = \"k\", lw = 2, \n label = \"prior dist. of suite price\")\n\n_hist = plt.hist(price_trace, bins = 35, normed= True, histtype= \"stepfilled\")\nplt.title(\"Posterior of the true price estimate\")\nplt.vlines(mu_prior, 0, 1.1*np.max(_hist[0]), label = \"prior's mean\",\n linestyles=\"--\")\nplt.vlines(price_trace.mean(), 0, 1.1*np.max(_hist[0]), label = \"posterior's mean\", linestyles=\"-.\")\nplt.legend(loc = \"upper left\");\n\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.linspace", "matplotlib.pyplot.title", "matplotlib.pyplot.autoscale", "scipy.stats.norm.pdf", "numpy.max", "matplotlib.pyplot.subplot", "matplotlib.pyplot.hist" ] ]
ajaybhat/DLND
[ "014e4973835817c6e727ff164e5253371f28fe07" ]
[ "Project 5/helper.py" ]
[ "import math\nimport os\nimport hashlib\nfrom urllib.request import urlretrieve\nimport zipfile\nimport gzip\nimport shutil\n\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\n\n\ndef _read32(bytestream):\n \"\"\"\n Read 32-bit integer from bytesteam\n :param bytestream: A bytestream\n :return: 32-bit integer\n \"\"\"\n dt = np.dtype(np.uint32).newbyteorder('>')\n return np.frombuffer(bytestream.read(4), dtype=dt)[0]\n\n\ndef _unzip(save_path, _, database_name, data_path):\n \"\"\"\n Unzip wrapper with the same interface as _ungzip\n :param save_path: The path of the gzip files\n :param database_name: Name of database\n :param data_path: Path to extract to\n :param _: HACK - Used to have to same interface as _ungzip\n \"\"\"\n print('Extracting {}...'.format(database_name))\n with zipfile.ZipFile(save_path) as zf:\n zf.extractall(data_path)\n\n\ndef _ungzip(save_path, extract_path, database_name, _):\n \"\"\"\n Unzip a gzip file and extract it to extract_path\n :param save_path: The path of the gzip files\n :param extract_path: The location to extract the data to\n :param database_name: Name of database\n :param _: HACK - Used to have to same interface as _unzip\n \"\"\"\n # Get data from save_path\n with open(save_path, 'rb') as f:\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number {} in file: {}'.format(magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols)\n\n # Save data to extract_path\n for image_i, image in enumerate(\n tqdm(data, unit='File', unit_scale=True, miniters=1, desc='Extracting {}'.format(database_name))):\n Image.fromarray(image, 'L').save(os.path.join(extract_path, 'image_{}.jpg'.format(image_i)))\n\n\ndef get_image(image_path, width, height, mode):\n \"\"\"\n Read image from image_path\n :param image_path: Path of image\n :param width: Width of image\n :param height: Height of image\n :param mode: Mode of image\n :return: Image data\n \"\"\"\n image = Image.open(image_path)\n\n if image.size != (width, height): # HACK - Check if image is from the CELEBA dataset\n # Remove most pixels that aren't part of a face\n face_width = face_height = 108\n j = (image.size[0] - face_width) // 2\n i = (image.size[1] - face_height) // 2\n image = image.crop([j, i, j + face_width, i + face_height])\n image = image.resize([width, height], Image.BILINEAR)\n\n return np.array(image.convert(mode))\n\n\ndef get_batch(image_files, width, height, mode):\n data_batch = np.array(\n [get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n\n # Make sure the images are in 4 dimensions\n if len(data_batch.shape) < 4:\n data_batch = data_batch.reshape(data_batch.shape + (1,))\n\n return data_batch\n\n\ndef images_square_grid(images, mode):\n \"\"\"\n Save images as a square grid\n :param images: Images to be used for the grid\n :param mode: The mode to use for images\n :return: Image of images in a square grid\n \"\"\"\n # Get maximum size for square grid of images\n save_size = math.floor(np.sqrt(images.shape[0]))\n\n # Scale to 0-255\n images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)\n\n # Put images in a square arrangement\n images_in_square = np.reshape(\n images[:save_size*save_size],\n (save_size, save_size, images.shape[1], images.shape[2], images.shape[3]))\n if mode == 'L':\n images_in_square = np.squeeze(images_in_square, 4)\n\n # Combine images to grid image\n new_im = Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size))\n for col_i, col_images in enumerate(images_in_square):\n for image_i, image in enumerate(col_images):\n im = Image.fromarray(image, mode)\n new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))\n\n return new_im\n\n\ndef download_extract(database_name, data_path):\n \"\"\"\n Download and extract database\n :param database_name: Database name\n \"\"\"\n DATASET_CELEBA_NAME = 'celeba'\n DATASET_MNIST_NAME = 'mnist'\n\n if database_name == DATASET_CELEBA_NAME:\n url = 'https://s3-us-west-1.amazonaws.com/udacity-dlnfd/datasets/celeba.zip'\n hash_code = '00d2c5bc6d35e252742224ab0c1e8fcb'\n extract_path = os.path.join(data_path, 'img_align_celeba')\n save_path = os.path.join(data_path, 'celeba.zip')\n extract_fn = _unzip\n elif database_name == DATASET_MNIST_NAME:\n url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'\n hash_code = 'f68b3c2dcbeaaa9fbdd348bbdeb94873'\n extract_path = os.path.join(data_path, 'mnist')\n save_path = os.path.join(data_path, 'train-images-idx3-ubyte.gz')\n extract_fn = _ungzip\n\n if os.path.exists(extract_path):\n print('Found {} Data'.format(database_name))\n return\n\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n\n if not os.path.exists(save_path):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Downloading {}'.format(database_name)) as pbar:\n urlretrieve(\n url,\n save_path,\n pbar.hook)\n\n assert hashlib.md5(open(save_path, 'rb').read()).hexdigest() == hash_code, \\\n '{} file is corrupted. Remove the file and try again.'.format(save_path)\n\n os.makedirs(extract_path)\n try:\n extract_fn(save_path, extract_path, database_name, data_path)\n except Exception as err:\n shutil.rmtree(extract_path) # Remove extraction folder if there is an error\n raise err\n\n # Remove compressed data\n os.remove(save_path)\n\n\nclass Dataset(object):\n \"\"\"\n Dataset\n \"\"\"\n def __init__(self, dataset_name, data_files):\n \"\"\"\n Initalize the class\n :param dataset_name: Database name\n :param data_files: List of files in the database\n \"\"\"\n DATASET_CELEBA_NAME = 'celeba'\n DATASET_MNIST_NAME = 'mnist'\n IMAGE_WIDTH = 28\n IMAGE_HEIGHT = 28\n\n if dataset_name == DATASET_CELEBA_NAME:\n self.image_mode = 'RGB'\n image_channels = 3\n\n elif dataset_name == DATASET_MNIST_NAME:\n self.image_mode = 'L'\n image_channels = 1\n\n self.data_files = data_files\n self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels\n\n def get_batches(self, batch_size):\n \"\"\"\n Generate batches\n :param batch_size: Batch Size\n :return: Batches of data\n \"\"\"\n IMAGE_MAX_VALUE = 255\n\n current_index = 0\n while current_index + batch_size <= self.shape[0]:\n data_batch = get_batch(\n self.data_files[current_index:current_index + batch_size],\n *self.shape[1:3],\n self.image_mode)\n\n current_index += batch_size\n\n yield data_batch / IMAGE_MAX_VALUE - 0.5\n\n\nclass DLProgress(tqdm):\n \"\"\"\n Handle Progress Bar while Downloading\n \"\"\"\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n \"\"\"\n A hook function that will be called once on establishment of the network connection and\n once after each block read thereafter.\n :param block_num: A count of blocks transferred so far\n :param block_size: Block size in bytes\n :param total_size: The total size of the file. This may be -1 on older FTP servers which do not return\n a file size in response to a retrieval request.\n \"\"\"\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n " ]
[ [ "numpy.sqrt", "numpy.reshape", "numpy.squeeze", "numpy.dtype", "numpy.frombuffer" ] ]
Tripartito/AugmentedReality
[ "b0a4455bc15ab5a5647b221bd1efa715fe9114cc" ]
[ "CV/5_LinearFilters.py" ]
[ "# Modules normally used\nimport numpy as np\nimport cv2\n\ndef CreateFrame(img, krad, color): # Frame (trick to avoid out-of-bounds access)\n height, width, depth = img.shape\n\n if color == \"white\":\n frm = np.ones((height + krad * 2, width + krad * 2, depth))\n else:\n frm = np.zeros((height + krad * 2, width + krad * 2, depth))\n\n frm[krad:-krad, krad:-krad] = img\n\n return frm\n\ndef FilterImage(img, framed, ksize, krn): # Apply filter to image\n shape = img.shape\n height = shape[0]\n width = shape[1]\n\n fil = np.zeros(img.shape)\n\n # Method 1 (Optimal)\n for i in range(0, height):\n for j in range(0, width):\n fil[i, j] = (framed[i:i+ksize, j:j+ksize] * krn[:, :, np.newaxis]).sum(axis=(0, 1))\n\n # Method 2 (Visual)\n #for i in range (0, height):\n # for j in range (0, width):\n # sub = framed[i:i+ksize, j:j+ksize]\n # b = (sub[:, :, 0] * krn).sum()\n # g = (sub[:, :, 1] * krn).sum()\n # r = (sub[:, :, 2] * krn).sum()\n # fil[i, j] = (b, g, r)\n\n return fil\n\n# Normalized Box Filter\ndef BoxFilter(img, ksize): #ksize = kernel size (\"diameter\")\n\n # Kernel definition\n krad = int(ksize / 2) # kernel radius\n krn = np.ones((ksize, ksize)) # create grid\n krn /= krn.sum() # normalize kernel\n\n # Frame (trick to avoid out-of-bounds access)\n framed = CreateFrame(img, krad, \"black\")\n\n return FilterImage(img, framed, ksize, krn)\n\ndef GaussianKernel(ksize):\n\n krad = int(ksize / 2)\n \n # Method 1 (Optimal)\n x, y = np.meshgrid(np.linspace(-krad, krad, ksize), np.linspace(-krad, krad, ksize))\n d = np.sqrt(x * x + y * y)\n mu, sigma = 0.0, krad / 3 # sigma = 0.4\n krn = np.exp(-((mu - d)**2 / (2 * sigma**2)))\n krn /= krn.sum()\n \n # Method 2 (Visual)\n #krn = np.zeros((ksize, ksize))\n #sigma = krad / 3\n\n #for i in range (0, ksize):\n # for j in range (0, ksize):\n # d = np.sqrt((krad - i)**2 + (krad - j)**2)\n # krn[i, j] = np.exp(-(d**2 / (2.0 * sigma**2)))\n\n # Normalize kernel\n krn /= krn.sum()\n\n return krn\n\n# Normalized Gaussian Filter\ndef GaussianFilter(img, ksize):\n\n #Gaussian Kernel\n krad = int(ksize / 2)\n krn = GaussianKernel(ksize)\n\n # Frame (trick to avoid out-of-bounds access)\n framed = CreateFrame(img, krad, \"white\")\n \n #Filtered image (output)\n return FilterImage(img, framed, ksize, krn)\n\ndef main():\n img = cv2.imread(\"sonic.jpg\", cv2.IMREAD_COLOR)\n img = img / 255.0\n #filtered = BoxFilter(img, 5)\n filtered = GaussianFilter(img, 31)\n cv2.imshow(\"Original\", img)\n cv2.imshow(\"Filtered\", filtered)\n cv2.waitKey(0)\n\nmain()" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.ones", "numpy.exp", "numpy.zeros" ] ]
YhHoo/Python-ANN
[ "6d3629f54100c9dee721108352f562cd28a2e5fc" ]
[ "activity_2.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom activity_1 import Neural_Object\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\ntemp = [] # to store the weights\n\n# ------------------------------------------\n# Load the saved optimized weights from .txt\n# ------------------------------------------\n\nuserInput = input(\"Do you want to recall the saved weights?(y/n): \")\n\nif userInput is \"y\" or userInput is \"Y\":\n with open(\"weights.txt\", \"r\") as f:\n for w in f:\n temp.append(float(w.strip()))\n OptimizedWeight_1 = temp\n temp = np.asarray(temp).reshape((2, 3))\n print(\"Weights 1 =\\n\", temp)\n temp = [] # Bring the array bac to list\n\n with open(\"weights2.txt\", \"r\") as f:\n for w in f:\n temp.append(float(w.strip()))\n OptimizedWeight_2 = temp\n temp = np.asarray(temp).reshape((3, 1))\n print(\"Weights 2 =\\n\", temp)\n\n\n# ------------------------------------------\n# Load the saved optimized weights from .txt\n# ------------------------------------------\n\nNN = Neural_Object()\nNN.setParams(OptimizedWeight_1 + OptimizedWeight_2)\nprint(\"\\nWeight 1:\\n\", NN.W1)\nprint(\"\\nWeight 2:\\n\", NN.W2)\n\n# create all possible data set to test the model\nhourStudy = np.linspace(0, 10, 100)\nhourSleep = np.linspace(0, 5, 100)\n# Normalize\nhourSleepNorm = hourSleep / 10\nhourStudyNorm = hourStudy / 5\n\n# create 2D data sets(x, y) of all combinations of hSleep and hStudy\na, b = np.meshgrid(hourSleepNorm, hourStudyNorm)\ndataSet = np.zeros((a.size, 2))\n\n# set all column 0 to the hourStudyNorm[10000]\ndataSet[:, 0] = a.ravel()\ndataSet[:, 1] = b.ravel()\n\nallOutputs = NN.forward(dataSet)\n\n\n# -------------------------------\n# Display the result in 3D graphs\n# -------------------------------\n\nxx = np.dot(hourStudy.reshape(100, 1), np.ones((1, 100)))\nyy = np.dot(hourSleep.reshape(100, 1), np.ones((1, 100))).T\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\n\nsurf = ax.plot_surface(xx, yy, 100*allOutputs.reshape(100, 100),\n cmap=cm.jet)\n\nax.set_xlabel('Hours Sleep')\nax.set_ylabel('Hours Study')\nax.set_zlabel('Test Score')\n\nplt.show()\n" ]
[ [ "numpy.linspace", "numpy.asarray", "numpy.ones", "numpy.meshgrid", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
pipete40/warehousing-tools
[ "58cd057af8d0ccd3af32001b8b0dfdb0ab6b9620" ]
[ "slotting/plots.py" ]
[ "\nimport numpy as np\nfrom . import aux_funcs as af\nfrom bokeh.resources import CDN\nfrom bokeh.embed import components\nfrom bokeh.palettes import Spectral11\nfrom bokeh.plotting import figure\nfrom bokeh.models import Label\nfrom bokeh.models.tickers import FixedTicker\n\ndef graph_groups_inventory(x, N, hs, invs):\n\n invs_T = af.group_inventory_signals(x, hs, invs)\n Ns = np.cumsum(N[::-1])\n n, dates = invs_T.shape\n #format fig\n p = figure(plot_width=700, plot_height=400, y_range=[0, int(np.max(invs_T)*1.1)],\n x_axis_label='Time', y_axis_label='Number of Pallets',\n )\n p.xaxis.axis_label_text_font_size = \"12pt\"\n p.yaxis.axis_label_text_font_size = \"12pt\"\n p.xaxis.major_label_text_font_size = \"11pt\"\n p.yaxis.major_label_text_font_size = \"11pt\"\n p.xaxis.axis_label_text_font = \"helvetica\"\n p.yaxis.axis_label_text_font = \"helvetica\"\n p.xaxis.axis_label_text_font_style = \"normal\"\n p.yaxis.axis_label_text_font_style = \"normal\"\n p.xgrid.grid_line_color = None\n p.ygrid.grid_line_color = None\n my_palette = Spectral11[0:n]\n\n xs_single = np.arange(dates)\n xs = [xs_single] * n\n ys = invs_T.tolist()\n\n #plot timeseries\n p.multi_line(xs, ys, line_color=my_palette, line_width=2)\n\n #plot slot quantities\n p.multi_line([[0, dates]]*n, [[k] * n for k in Ns], line_color=['red']*n, line_width=1, line_dash=\"dashed\")\n\n #include the annotations\n x_end = int(dates*0.95)\n SUB = str.maketrans(\"0123456789\", \"₀₁₂₃₄₅₆₇₈₉\")\n for i in range(n):\n t = \"x\" + str(n-i)\n text = Label(x=x_end, y=Ns[i], text=t.translate(SUB), render_mode='css',\n background_fill_color='white', background_fill_alpha=0, text_font_style='italic',\n text_font='Times', text_font_size='13pt')\n p.add_layout(text)\n\n script, div = components(p, CDN)\n return script, div\n\n\ndef graph_fvals(fvals):\n fvals = np.asarray(fvals)\n percentages = np.round(np.divide(-np.diff(fvals), fvals[1:]) * 100, 1).tolist()\n\n p = figure(plot_width=600, plot_height=400, y_range=[0,int(np.max(fvals)*1.2)], x_range=[0, fvals.shape[0]+1],\n x_axis_label='Number of slot types', y_axis_label='Sum of height of all slots',\n )\n p.xaxis.axis_label_text_font_size = \"12pt\"\n p.yaxis.axis_label_text_font_size = \"12pt\"\n p.xaxis.major_label_text_font_size = \"11pt\"\n p.yaxis.major_label_text_font_size = \"11pt\"\n p.xaxis.axis_label_text_font = \"helvetica\"\n p.yaxis.axis_label_text_font = \"helvetica\"\n p.xaxis.major_label_text_font = \"helvetica\"\n p.yaxis.major_label_text_font = \"helvetica\"\n p.xaxis.axis_label_text_font_style = \"normal\"\n p.yaxis.axis_label_text_font_style = \"normal\"\n p.ygrid.grid_line_dash = [6, 4]\n p.xgrid.grid_line_dash = [6, 4]\n\n xs = np.arange(1,fvals.shape[0]+1)\n p.circle(xs, fvals)\n p.line(xs, fvals, line_dash=\"4 4\", line_width=1, line_alpha=0.4)\n p.xaxis.ticker = FixedTicker(ticks=xs)\n\n for i, perc in enumerate(percentages):\n t = str(perc)+'%'\n text = Label(x=xs[i+1], y=fvals[i+1], text=t, render_mode='css',\n background_fill_color='white', background_fill_alpha=0, text_font='Times', text_font_size='13pt')\n p.add_layout(text)\n\n script, div = components(p, CDN)\n return script, div" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.cumsum", "numpy.max", "numpy.diff" ] ]
arulvelkumar/jira-agile-metrics
[ "55c4a25cbaf487cf0a5faf57879c0a34473b59c9" ]
[ "jira_agile_metrics/calculators/defects_test.py" ]
[ "import pytest\nfrom pandas import Timestamp, NaT\n\nfrom ..conftest import (\n FauxJIRA as JIRA,\n FauxIssue as Issue,\n FauxFieldValue as Value,\n)\n\nfrom ..utils import extend_dict\n\nfrom ..querymanager import QueryManager\nfrom .defects import DefectsCalculator\n\n\[email protected]\ndef fields(minimal_fields):\n return minimal_fields + [\n {\"id\": \"priority\", \"name\": \"Priority\"},\n {\"id\": \"customfield_001\", \"name\": \"Environment\"},\n {\"id\": \"customfield_002\", \"name\": \"Defect type\"},\n ]\n\n\[email protected]\ndef settings(minimal_settings):\n return extend_dict(\n minimal_settings,\n {\n \"defects_query\": \"issueType = Defect\",\n \"defects_window\": 3,\n \"defects_priority_field\": \"Priority\",\n \"defects_priority_values\": [\"Low\", \"Medium\", \"High\"],\n \"defects_type_field\": \"Defect type\",\n \"defects_type_values\": [\"Config\", \"Data\", \"Code\"],\n \"defects_environment_field\": \"Environment\",\n \"defects_environment_values\": [\"SIT\", \"UAT\", \"PROD\"],\n \"defects_by_priority_chart\": \"defects-by-priority.png\",\n \"defects_by_priority_chart_title\": \"Defects by priority\",\n \"defects_by_type_chart\": \"defects-by-type.png\",\n \"defects_by_type_chart_title\": \"Defects by type\",\n \"defects_by_environment_chart\": \"defects-by-environment.png\",\n \"defects_by_environment_chart_title\": \"Defects by environment\",\n },\n )\n\n\[email protected]\ndef jira(fields):\n return JIRA(\n fields=fields,\n issues=[\n Issue(\n \"D-1\",\n summary=\"Debt 1\",\n issuetype=Value(\"Bug\", \"Bug\"),\n status=Value(\"Closed\", \"closed\"),\n created=\"2018-01-01 01:01:01\",\n resolution=\"Done\",\n resolutiondate=\"2018-03-20 02:02:02\",\n priority=Value(\"High\", \"High\"),\n customfield_001=Value(None, \"PROD\"),\n customfield_002=Value(None, \"Config\"),\n changes=[],\n ),\n Issue(\n \"D-2\",\n summary=\"Debt 2\",\n issuetype=Value(\"Bug\", \"Bug\"),\n status=Value(\"Closed\", \"closed\"),\n created=\"2018-01-02 01:01:01\",\n resolution=\"Done\",\n resolutiondate=\"2018-01-20 02:02:02\",\n priority=Value(\"Medium\", \"Medium\"),\n customfield_001=Value(None, \"SIT\"),\n customfield_002=Value(None, \"Config\"),\n changes=[],\n ),\n Issue(\n \"D-3\",\n summary=\"Debt 3\",\n issuetype=Value(\"Bug\", \"Bug\"),\n status=Value(\"Closed\", \"closed\"),\n created=\"2018-02-03 01:01:01\",\n resolution=\"Done\",\n resolutiondate=\"2018-03-20 02:02:02\",\n priority=Value(\"High\", \"High\"),\n customfield_001=Value(None, \"UAT\"),\n customfield_002=Value(None, \"Config\"),\n changes=[],\n ),\n Issue(\n \"D-4\",\n summary=\"Debt 4\",\n issuetype=Value(\"Bug\", \"Bug\"),\n status=Value(\"Closed\", \"closed\"),\n created=\"2018-01-04 01:01:01\",\n resolution=None,\n resolutiondate=None,\n priority=Value(\"Medium\", \"Medium\"),\n customfield_001=Value(None, \"PROD\"),\n customfield_002=Value(None, \"Data\"),\n changes=[],\n ),\n Issue(\n \"D-5\",\n summary=\"Debt 5\",\n issuetype=Value(\"Bug\", \"Bug\"),\n status=Value(\"Closed\", \"closed\"),\n created=\"2018-02-05 01:01:01\",\n resolution=\"Done\",\n resolutiondate=\"2018-02-20 02:02:02\",\n priority=Value(\"High\", \"High\"),\n customfield_001=Value(None, \"SIT\"),\n customfield_002=Value(None, \"Data\"),\n changes=[],\n ),\n Issue(\n \"D-6\",\n summary=\"Debt 6\",\n issuetype=Value(\"Bug\", \"Bug\"),\n status=Value(\"Closed\", \"closed\"),\n created=\"2018-03-06 01:01:01\",\n resolution=None,\n resolutiondate=None,\n priority=Value(\"Medium\", \"Medium\"),\n customfield_001=Value(None, \"UAT\"),\n customfield_002=Value(None, \"Data\"),\n changes=[],\n ),\n ],\n )\n\n\ndef test_no_query(jira, settings):\n query_manager = QueryManager(jira, settings)\n results = {}\n settings = extend_dict(settings, {\"defects_query\": None})\n calculator = DefectsCalculator(query_manager, settings, results)\n\n data = calculator.run()\n assert data is None\n\n\ndef test_columns(jira, settings):\n query_manager = QueryManager(jira, settings)\n results = {}\n calculator = DefectsCalculator(query_manager, settings, results)\n\n data = calculator.run()\n\n assert list(data.columns) == [\n \"key\",\n \"priority\",\n \"type\",\n \"environment\",\n \"created\",\n \"resolved\",\n ]\n\n\ndef test_empty(fields, settings):\n jira = JIRA(fields=fields, issues=[])\n query_manager = QueryManager(jira, settings)\n results = {}\n calculator = DefectsCalculator(query_manager, settings, results)\n\n data = calculator.run()\n\n assert len(data.index) == 0\n\n\ndef test_breakdown(jira, settings):\n query_manager = QueryManager(jira, settings)\n results = {}\n calculator = DefectsCalculator(query_manager, settings, results)\n\n data = calculator.run()\n\n assert data.to_dict(\"records\") == [\n {\n \"key\": \"D-1\",\n \"created\": Timestamp(\"2018-01-01 01:01:01\"),\n \"resolved\": Timestamp(\"2018-03-20 02:02:02\"),\n \"priority\": \"High\",\n \"environment\": \"PROD\",\n \"type\": \"Config\",\n },\n {\n \"key\": \"D-2\",\n \"created\": Timestamp(\"2018-01-02 01:01:01\"),\n \"resolved\": Timestamp(\"2018-01-20 02:02:02\"),\n \"priority\": \"Medium\",\n \"environment\": \"SIT\",\n \"type\": \"Config\",\n },\n {\n \"key\": \"D-3\",\n \"created\": Timestamp(\"2018-02-03 01:01:01\"),\n \"resolved\": Timestamp(\"2018-03-20 02:02:02\"),\n \"priority\": \"High\",\n \"environment\": \"UAT\",\n \"type\": \"Config\",\n },\n {\n \"key\": \"D-4\",\n \"created\": Timestamp(\"2018-01-04 01:01:01\"),\n \"resolved\": NaT,\n \"priority\": \"Medium\",\n \"environment\": \"PROD\",\n \"type\": \"Data\",\n },\n {\n \"key\": \"D-5\",\n \"created\": Timestamp(\"2018-02-05 01:01:01\"),\n \"resolved\": Timestamp(\"2018-02-20 02:02:02\"),\n \"priority\": \"High\",\n \"environment\": \"SIT\",\n \"type\": \"Data\",\n },\n {\n \"key\": \"D-6\",\n \"created\": Timestamp(\"2018-03-06 01:01:01\"),\n \"resolved\": NaT,\n \"priority\": \"Medium\",\n \"environment\": \"UAT\",\n \"type\": \"Data\",\n },\n ]\n\n\ndef test_no_priority_field(jira, settings):\n settings = extend_dict(settings, {\"defects_priority_field\": None})\n\n query_manager = QueryManager(jira, settings)\n results = {}\n calculator = DefectsCalculator(query_manager, settings, results)\n\n data = calculator.run()\n\n assert data.to_dict(\"records\") == [\n {\n \"key\": \"D-1\",\n \"created\": Timestamp(\"2018-01-01 01:01:01\"),\n \"resolved\": Timestamp(\"2018-03-20 02:02:02\"),\n \"priority\": None,\n \"environment\": \"PROD\",\n \"type\": \"Config\",\n },\n {\n \"key\": \"D-2\",\n \"created\": Timestamp(\"2018-01-02 01:01:01\"),\n \"resolved\": Timestamp(\"2018-01-20 02:02:02\"),\n \"priority\": None,\n \"environment\": \"SIT\",\n \"type\": \"Config\",\n },\n {\n \"key\": \"D-3\",\n \"created\": Timestamp(\"2018-02-03 01:01:01\"),\n \"resolved\": Timestamp(\"2018-03-20 02:02:02\"),\n \"priority\": None,\n \"environment\": \"UAT\",\n \"type\": \"Config\",\n },\n {\n \"key\": \"D-4\",\n \"created\": Timestamp(\"2018-01-04 01:01:01\"),\n \"resolved\": NaT,\n \"priority\": None,\n \"environment\": \"PROD\",\n \"type\": \"Data\",\n },\n {\n \"key\": \"D-5\",\n \"created\": Timestamp(\"2018-02-05 01:01:01\"),\n \"resolved\": Timestamp(\"2018-02-20 02:02:02\"),\n \"priority\": None,\n \"environment\": \"SIT\",\n \"type\": \"Data\",\n },\n {\n \"key\": \"D-6\",\n \"created\": Timestamp(\"2018-03-06 01:01:01\"),\n \"resolved\": NaT,\n \"priority\": None,\n \"environment\": \"UAT\",\n \"type\": \"Data\",\n },\n ]\n\n\ndef test_no_type_field(jira, settings):\n settings = extend_dict(settings, {\"defects_type_field\": None})\n\n query_manager = QueryManager(jira, settings)\n results = {}\n calculator = DefectsCalculator(query_manager, settings, results)\n\n data = calculator.run()\n\n assert data.to_dict(\"records\") == [\n {\n \"key\": \"D-1\",\n \"created\": Timestamp(\"2018-01-01 01:01:01\"),\n \"resolved\": Timestamp(\"2018-03-20 02:02:02\"),\n \"priority\": \"High\",\n \"environment\": \"PROD\",\n \"type\": None,\n },\n {\n \"key\": \"D-2\",\n \"created\": Timestamp(\"2018-01-02 01:01:01\"),\n \"resolved\": Timestamp(\"2018-01-20 02:02:02\"),\n \"priority\": \"Medium\",\n \"environment\": \"SIT\",\n \"type\": None,\n },\n {\n \"key\": \"D-3\",\n \"created\": Timestamp(\"2018-02-03 01:01:01\"),\n \"resolved\": Timestamp(\"2018-03-20 02:02:02\"),\n \"priority\": \"High\",\n \"environment\": \"UAT\",\n \"type\": None,\n },\n {\n \"key\": \"D-4\",\n \"created\": Timestamp(\"2018-01-04 01:01:01\"),\n \"resolved\": NaT,\n \"priority\": \"Medium\",\n \"environment\": \"PROD\",\n \"type\": None,\n },\n {\n \"key\": \"D-5\",\n \"created\": Timestamp(\"2018-02-05 01:01:01\"),\n \"resolved\": Timestamp(\"2018-02-20 02:02:02\"),\n \"priority\": \"High\",\n \"environment\": \"SIT\",\n \"type\": None,\n },\n {\n \"key\": \"D-6\",\n \"created\": Timestamp(\"2018-03-06 01:01:01\"),\n \"resolved\": NaT,\n \"priority\": \"Medium\",\n \"environment\": \"UAT\",\n \"type\": None,\n },\n ]\n\n\ndef test_no_environment_field(jira, settings):\n settings = extend_dict(settings, {\"defects_environment_field\": None})\n\n query_manager = QueryManager(jira, settings)\n results = {}\n calculator = DefectsCalculator(query_manager, settings, results)\n\n data = calculator.run()\n\n assert data.to_dict(\"records\") == [\n {\n \"key\": \"D-1\",\n \"created\": Timestamp(\"2018-01-01 01:01:01\"),\n \"resolved\": Timestamp(\"2018-03-20 02:02:02\"),\n \"priority\": \"High\",\n \"environment\": None,\n \"type\": \"Config\",\n },\n {\n \"key\": \"D-2\",\n \"created\": Timestamp(\"2018-01-02 01:01:01\"),\n \"resolved\": Timestamp(\"2018-01-20 02:02:02\"),\n \"priority\": \"Medium\",\n \"environment\": None,\n \"type\": \"Config\",\n },\n {\n \"key\": \"D-3\",\n \"created\": Timestamp(\"2018-02-03 01:01:01\"),\n \"resolved\": Timestamp(\"2018-03-20 02:02:02\"),\n \"priority\": \"High\",\n \"environment\": None,\n \"type\": \"Config\",\n },\n {\n \"key\": \"D-4\",\n \"created\": Timestamp(\"2018-01-04 01:01:01\"),\n \"resolved\": NaT,\n \"priority\": \"Medium\",\n \"environment\": None,\n \"type\": \"Data\",\n },\n {\n \"key\": \"D-5\",\n \"created\": Timestamp(\"2018-02-05 01:01:01\"),\n \"resolved\": Timestamp(\"2018-02-20 02:02:02\"),\n \"priority\": \"High\",\n \"environment\": None,\n \"type\": \"Data\",\n },\n {\n \"key\": \"D-6\",\n \"created\": Timestamp(\"2018-03-06 01:01:01\"),\n \"resolved\": NaT,\n \"priority\": \"Medium\",\n \"environment\": None,\n \"type\": \"Data\",\n },\n ]\n" ]
[ [ "pandas.Timestamp" ] ]