repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
zjjliujs/caffe
|
[
"9d17cb456274f8e579dfcb0b1e0097d604ae37c6"
] |
[
"python/caffe_double/test/test_solver.py"
] |
[
"import unittest\nimport tempfile\nimport os\nimport numpy as np\nimport six\n\nimport caffe_double\nfrom test_net import simple_net_file\n\n\nclass TestSolver(unittest.TestCase):\n def setUp(self):\n self.num_output = 13\n net_f = simple_net_file(self.num_output)\n f = tempfile.NamedTemporaryFile(mode='w+', delete=False)\n f.write(\"\"\"net: '\"\"\" + net_f + \"\"\"'\n test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9\n weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75\n display: 100 max_iter: 100 snapshot_after_train: false\n snapshot_prefix: \"model\" \"\"\")\n f.close()\n self.solver = caffe_double.SGDSolver(f.name)\n # also make sure get_solver runs\n caffe_double.get_solver(f.name)\n caffe_double.set_mode_cpu()\n # fill in valid labels\n self.solver.net.blobs['label'].data[...] = \\\n np.random.randint(self.num_output,\n size=self.solver.net.blobs['label'].data.shape)\n self.solver.test_nets[0].blobs['label'].data[...] = \\\n np.random.randint(self.num_output,\n size=self.solver.test_nets[0].blobs['label'].data.shape)\n os.remove(f.name)\n os.remove(net_f)\n\n def test_solve(self):\n self.assertEqual(self.solver.iter, 0)\n self.solver.solve()\n self.assertEqual(self.solver.iter, 100)\n\n def test_apply_update(self):\n net = self.solver.net\n data = net.layers[1].blobs[0].data[...]\n # Reset the weights of that layer to 0\n data[...] = 0\n net.layers[1].blobs[0].diff[...] = 1\n # Apply the update, the initial learning rate should be 0.01\n self.solver.apply_update()\n # Check that the new weights are -0.01, with a precision of 1e-7\n self.assertTrue((data - -0.01 * np.ones(data.shape)).max() < 1e-7)\n\n def test_net_memory(self):\n \"\"\"Check that nets survive after the solver is destroyed.\"\"\"\n\n nets = [self.solver.net] + list(self.solver.test_nets)\n self.assertEqual(len(nets), 2)\n del self.solver\n\n total = 0\n for net in nets:\n for ps in six.itervalues(net.params):\n for p in ps:\n total += p.data.sum() + p.diff.sum()\n for bl in six.itervalues(net.blobs):\n total += bl.data.sum() + bl.diff.sum()\n\n def test_snapshot(self):\n self.solver.snapshot()\n # Check that these files exist and then remove them\n files = ['model_iter_0.caffemodel', 'model_iter_0.solverstate']\n for fn in files:\n assert os.path.isfile(fn)\n os.remove(fn)\n"
] |
[
[
"numpy.ones",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stepinski/machinelearning
|
[
"1f84883a25616da4cd76bb4655267efd3421e561"
] |
[
"mit-ml/mnist/part2-twodigit/mlp.py"
] |
[
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom train_utils import batchify_data, run_epoch, train_model, Flatten\nimport utils_multiMNIST as U\npath_to_data_dir = '../Datasets/'\nuse_mini_dataset = True\n\nbatch_size = 64\nnb_classes = 10\nnb_epoch = 30\nnum_classes = 10\nimg_rows, img_cols = 42, 28 # input image dimensions\n\nclass MLP(nn.Module):\n\n def __init__(self, input_dimension):\n super(MLP, self).__init__()\n print(input_dimension)\n self.flatten = Flatten()\n self.linear = nn.Linear(input_dimension,64)\n self.output1 = lambda x : nn.Linear(64,x) # for top digit\n self.output2 = nn.Linear(64,64) \n \n\n def forward(self, x):\n xf = self.flatten(x)\n # use model layers to predict the two digits\n input= self.linear(xf)\n out1=self.output1(input,out_features=x.shape[0])\n out2=self.output2(input,out_features=x.shape[0])\n \n # print(\"test\")\n # print(xl[:, :1])\n # print(xl[:, 1:2])\n return out1,out2\n\ndef main():\n X_train, y_train, X_test, y_test = U.get_data(path_to_data_dir, use_mini_dataset)\n\n # Split into train and dev\n dev_split_index = int(9 * len(X_train) / 10)\n X_dev = X_train[dev_split_index:]\n y_dev = [y_train[0][dev_split_index:], y_train[1][dev_split_index:]]\n X_train = X_train[:dev_split_index]\n y_train = [y_train[0][:dev_split_index], y_train[1][:dev_split_index]]\n\n permutation = np.array([i for i in range(len(X_train))])\n np.random.shuffle(permutation)\n X_train = [X_train[i] for i in permutation]\n y_train = [[y_train[0][i] for i in permutation], [y_train[1][i] for i in permutation]]\n\n # Split dataset into batches\n train_batches = batchify_data(X_train, y_train, batch_size)\n dev_batches = batchify_data(X_dev, y_dev, batch_size)\n test_batches = batchify_data(X_test, y_test, batch_size)\n\n # Load model\n input_dimension = img_rows * img_cols\n model = MLP(input_dimension) # TODO add proper layers to MLP class above\n\n # Train\n train_model(train_batches, dev_batches, model)\n\n ## Evaluate the model on test data\n loss, acc = run_epoch(test_batches, model.eval(), None)\n print('Test loss1: {:.6f} accuracy1: {:.6f} loss2: {:.6f} accuracy2: {:.6f}'.format(loss[0], acc[0], loss[1], acc[1]))\n\nif __name__ == '__main__':\n # Specify seed for deterministic behavior, then shuffle. Do not change seed for official submissions to edx\n np.random.seed(12321) # for reproducibility\n torch.manual_seed(12321) # for reproducibility\n main()\n"
] |
[
[
"torch.nn.Linear",
"torch.manual_seed",
"numpy.random.seed",
"numpy.random.shuffle"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Meteodan/arpsEnKFtools
|
[
"848c4c0eb8921d17690bf35a24f6d0714c4bc37f",
"848c4c0eb8921d17690bf35a24f6d0714c4bc37f"
] |
[
"template/1km243x243_033116_newse/master_config.py",
"template/3km153x153_051913_SSEF_idealized_radarda/master_config.py"
] |
[
"\"\"\"\nmaster_config.py -- Contains parameters to configure an end-to-end ARPS-EnKF run\n\"\"\"\nimport os\nfrom datetime import datetime\nimport numpy as np\n\n# Define needed directories and experiment names/tags\n# Base project names and directories\nscratch_base_dir = '/scratch/rice/d/dawson29'\ndepot_base_dir = '/depot/dawson29'\narpsenkftools_base_dir = os.path.join(depot_base_dir, 'apps/Projects/arpsEnKFtools')\nproject_dir = 'Projects/VORTEXSE/simulations/ARPS'\nproject_scr_dir = os.path.join(scratch_base_dir, project_dir)\nproject_depot_dir = os.path.join(depot_base_dir, 'data', project_dir)\nIOP_name = '2016_IOP3'\nIOP_scr_dir = os.path.join(project_scr_dir, IOP_name, 'EnKF')\nIOP_depot_dir = os.path.join(project_depot_dir, IOP_name, 'EnKF')\next_model_data_dir = os.path.join(depot_base_dir,\n 'data/Projects/VORTEXSE/model_data/newse_data')\nsfc_obs_dir = os.path.join(depot_base_dir, 'data/Projects/VORTEXSE/obsdata/2016/sao', IOP_name)\nradar_obs_dir = os.path.join(depot_base_dir, 'data/Projects/VORTEXSE/obsdata/2016/NEXRAD/IOP_3/level2/')\nua_obs_dir = os.path.join(depot_base_dir, 'data/Projects/VORTEXSE/obsdata/2016/raob', IOP_name)\n# TODO: add other obs type directories here\n\n# Experiment name and directories\nexp_name_base = '1km243x243_033116'\nexp_name_tag = '_newse'\nexp_name = exp_name_base + exp_name_tag\nexp_scr_dir = os.path.join(IOP_scr_dir, exp_name)\nprep_work_dir = os.path.join(exp_scr_dir, '{}_prep_work'.format(exp_name))\nexp_depot_dir = os.path.join(IOP_depot_dir, exp_name)\ntemplate_base_dir = os.path.join(arpsenkftools_base_dir, 'template')\ntemplate_exp_dir = os.path.join(template_base_dir, exp_name)\nexternal_icbc_dir = os.path.join(IOP_depot_dir, exp_name+'_icbc')\nsfcdata_dir = os.path.join(project_depot_dir, 'sfcdata')\nsfcdata_file = '{}.sfcdata'.format(exp_name)\nsfcdata_path = os.path.join(sfcdata_dir, sfcdata_file)\ntrndata_dir = os.path.join(project_depot_dir, 'trndata')\ntrndata_file = '{}.trndata'.format(exp_name)\ntrndata_path = os.path.join(trndata_dir, trndata_file)\nradflag_file = '2016_IOP3_5min.radflag'\nradflag_path = os.path.join(template_exp_dir, radflag_file)\nradarinfo_file = 'radarinfo.dat'\nradarinfo_path = os.path.join(template_base_dir, radarinfo_file)\nblacklist_file = 'blacklist.sfc'\nblacklist_file_path = os.path.join(template_exp_dir, blacklist_file)\nremapped_radar_dir = os.path.join(project_depot_dir, '{}/remapped_radar/{}'.format(IOP_name, exp_name))\n\n# Executable file names and directories\narps_base_dir = '/home/dawson29/arps5.4_main'\narps_bin_dir = os.path.join(arps_base_dir, 'bin')\narpstrn_exe_path = os.path.join(arps_bin_dir, 'arpstrn')\narpssfc_exe_path = os.path.join(arps_bin_dir, 'arpssfc')\next2arps_exe_path = os.path.join(arps_bin_dir, 'ext2arps')\narps_exe_path = os.path.join(arps_bin_dir, 'arps_mpi')\narpsenkf_exe_path = os.path.join(arps_bin_dir, 'arpsenkf_mpi')\narpsenkfic_exe_path = os.path.join(arps_bin_dir, 'arpsenkfic')\nwrf2arps_exe_path = os.path.join(arps_bin_dir, 'wrf2arps_mpi')\narpsintrp_exe_path = os.path.join(arps_bin_dir, 'arpsintrp_mpi')\nradremap_exe_path = os.path.join(arps_bin_dir, '88d2arps')\nmpi_exe = 'mpiexec'\nmpi_nproc_flag = '-n'\n\n# Experiment parameters (many of these are namelist parameters that will be inserted in the\n# appropriate namelist input files for the various ARPS programs used in an experiment). See the\n# documentation in the various namelist input files for details on their meanings.\n\n# Basic experiment parameters\nnum_ensemble_members = 36\n# Initial time of entire experiment. Note, for nested ARPS runs this must be consistent with the\n# initial time of the original parent experiment!\ninitial_time = '201603311800'\ninitial_datetime = datetime.strptime(initial_time, '%Y%m%d%H%M')\n# Initial time in seconds from model start corresponding to initial_time (can be different from 0\n# if ext2arps/wrf2arps/arpsintrp is run to produce IC's for several different times)\ninitial_time_sec = 0\nperturb_ic = False\nif perturb_ic:\n external_inifile = '{}.hdf{:06d}'.format(exp_name, initial_time_sec)\n external_inigbf = '{}.hdfgrdbas'.format(exp_name)\nelse:\n external_inifile = 'ena001.hdf{:06d}'.format(initial_time_sec)\n external_inigbf = 'ena001.hdfgrdbas'\nexternal_inifile_path = os.path.join(external_icbc_dir, external_inifile)\nexternal_inigbf_path = os.path.join(external_icbc_dir, external_inigbf)\n\n# ARPS comment_lines namelist parameters\nnocmnt = 2\ncomments = ['ARPS 5.4', 'March 31st, 2016 VSE IOP3']\n\n# Grid and map projection parameters\ngrid_param = {\n 'nx': 243,\n 'ny': 243,\n 'nz': 53,\n 'nproc_x': 6,\n 'nproc_y': 5,\n 'dx': 1000.0,\n 'dy': 1000.0,\n 'dz': 400.0,\n 'strhopt': 1,\n 'dzmin': 20.0,\n 'zrefsfc': 0.0,\n 'dlayer1': 0.0,\n 'dlayer2': 1.0e5,\n 'strhtune': 0.2,\n 'zflat': 1.0e5,\n 'ctrlat': 34.799999,\n 'ctrlon': -87.680000,\n 'mapproj': 2,\n 'trulat1': 33.0,\n 'trulat2': 36.0,\n 'trulon': -87.680000,\n}\n\n# ARPSTRN parameters (note that this is set to use the 30-s terrain data. Will add hooks\n# for the other terrain data source options later)\narpstrn_param = {\n 'trndataopt': 3,\n 'dir_trndata': os.path.join(depot_base_dir, 'data/arpstopo30.data'),\n 'nsmth': 2,\n 'lat_sample': 30,\n 'lon_sample': 30,\n 'trnanxopt': 2,\n 'dirname': trndata_dir,\n 'terndmp': 3\n}\n\n# ARPSSFC parameters\narpssfc_param = {\n 'nstyp': 3,\n 'sfcdmp': 3,\n 'schmopt': 3,\n 'sdatopt': 1,\n 'fstypfl': os.path.join(depot_base_dir, 'data/arpssfc.data/soil_1km.data'),\n 'bstypfl': os.path.join(depot_base_dir, 'data/arpssfc.data/whsoil_1deg.data'),\n 'vdatopt': 1,\n 'fvtypfl': os.path.join(depot_base_dir, 'data/arpssfc.data/naoge1_01l_1km.img'),\n 'bvtypfl': os.path.join(depot_base_dir, 'data/arpssfc.data/owe14d_10min.data'),\n 'ndatopt': 1,\n 'fndvifl': os.path.join(depot_base_dir, 'data/arpssfc.data/naapr92ndl_1km.img'),\n 'bndvifl': os.path.join(depot_base_dir, 'data/arpssfc.data/ndvi9004_10min.data'),\n 'vfrcopt': 1,\n 'vfrcdr': os.path.join(depot_base_dir, 'data/arpssfc.data/'),\n 'nsmthsl': 3,\n 'stypout': 1,\n 'vtypout': 1,\n 'laiout': 1,\n 'rfnsout': 1,\n 'vegout': 1,\n 'ndviout': 1,\n 'dirname': sfcdata_dir\n}\n\n# WRF2ARPS parameters\nwrf2arps_param = {\n 'run_mpi': False,\n 'nproc_x': 5,\n 'nproc_y': 2,\n 'history_interval_sec': 900,\n 'history_interval': '00_00:15:00',\n 'init_timestamp': initial_time,\n 'end_timestamp': '201604010245',\n 'subdir_template': None,\n 'hdmpfmt': 3,\n 'exbcdmp': 3,\n 'dmp_out_joined': 1111111,\n 'wrfexttrnopt': 3,\n 'terndta': trndata_path,\n 'ternfmt': 3,\n 'extntmrg': 7,\n 'dirname': external_icbc_dir\n}\n\n# ARPSINTRP parameters\narpsintrp_param = {\n}\n\n# Radar remapper parameters\nradremap_param = {\n 'radar_list': ['KBMX', 'KGWX', 'KHPX', 'KHTX', 'KNQA', 'KOHX', 'KPAH'],\n 'start_timestamp': '20160331180000',\n 'end_timestamp': '20160401030000',\n 'interval_seconds': 300,\n 'tolerance': 900,\n 'closest_before': True,\n 'nthreads': 10\n}\n\n# EXT2ARPS parameters\next2arps_param = {\n}\n\n# ARPS parameters\n# Note that these include the comment, grid and map projection parameters already defined above\n# Also many of the parameters are shared with EXT2ARPS. So these are ones that are specific\n# to just the ARPS forward model component of the workflow. Parameters that aren't likely\n# to be changed very often but that are present in the namelist aren't included here, but can be\n# added as needed.\n\narps_param = {\n # Inifile and inigbf are only needed here for the arpsenkfic step. They are changed on the fly\n # during the actual ensemble integration to the appropriate ensemble member names\n 'nocmnt': nocmnt,\n 'cmnt(1)': comments[0],\n 'cmnt(2)': comments[1],\n 'runname': exp_name,\n 'initime': initial_datetime.strftime('%Y-%m-%d.%H:%M:00'),\n 'inifile': './{}'.format(external_inifile),\n 'inigbf': './{}'.format(external_inigbf),\n 'dtbig': 2.0,\n 'tstart': float(initial_time_sec),\n 'tstop': float(initial_time_sec),\n 'dtsml': 1.0,\n 'tintegopt': 1,\n 'tintvebd': 900, # DTD: for some reason this has to be an integer now or ARPS flips out...\n 'ngbrz': 10,\n 'brlxhw': 4,\n 'cbcdmp': 0.05,\n 'exbcfmt': 3,\n 'tmixopt': 4,\n 'trbisotp': 0,\n 'tkeopt': 3,\n 'trbvimp': 1,\n 'cfcm4h': 5.0e-4,\n 'cfcm4v': 5.0e-4,\n 'cmix_opt': 1,\n 'mphyopt': 15,\n 'sfcdtfl': sfcdata_file,\n 'sfcfmt': 3,\n 'dtsfc': 2.0,\n 'hdmpfmt': 103,\n 'thisdmp': 300.0,\n 'rfopt': 3,\n 'sv_lkup_tble': 1\n}\n\n# ARPSENKFIC parameters\narpsenkfic_param = {\n}\n\n# ARPSENKF parameters.\narpsenkf_param = {\n 'nrdrused': 1,\n 'radarname': ['KBMX', 'KGWX', 'KHPX', 'KHTX', 'KNQA', 'KOHX', 'KPAH'],\n 'ntwtype': [1, 1, 1, 1, 1, 1, 1],\n 'vcpmode': [11, 11, 11, 11, 11, 11, 11],\n 'rdrlocopt': [1, 1, 1, 1, 1, 1, 1]\n}\n\n# Parameters to generate an appropriate radflag file. Used by \"gen_radflag.py\"\nradflag_param = {\n # Add appropriate \"radar groups\" (i.e. all radars, only WSR-88Ds, only mobile, etc.)\n # And the time range for each to assimilate. Note that the gen_radflag.py script assumes\n # that there is no overlap between the times for each radar group.\n 'radar_groups': {\n 'all_radars': (arpsenkf_param['radarname'], np.arange(0., 31500. + 300., 300.))\n },\n}\n\n",
"\"\"\"\nmaster_config.py -- Contains parameters to configure an end-to-end ARPS-EnKF run\n\"\"\"\nimport os\nimport numpy as np\nfrom datetime import datetime\n\n# Define needed directories and experiment names/tags\n# Base project names and directories\nscratch_base_dir = '/scratch/rice/s/sharm261'\ndepot_base_dir = '/depot/rtanama/users/sharm261'\narpsenkftools_base_dir = '/home/sharm261/arpsEnKFtools'\nproject_dir = 'Projects/051913_OK_idealized/ARPS' # Note, removed redundant \"simulations\" subdirectory here\nproject_scr_dir = os.path.join(scratch_base_dir, project_dir)\nproject_depot_dir = os.path.join(depot_base_dir, 'data', project_dir)\nIOP_name = '' # Not needed for this experiment\nIOP_scr_dir = os.path.join(project_scr_dir, IOP_name, 'EnKF')\nIOP_depot_dir = os.path.join(project_depot_dir, IOP_name, 'EnKF')\nprep_work_dir = os.path.join(IOP_scr_dir, 'prep_work')\nsfc_obs_dir = os.path.join(depot_base_dir, 'data/Projects/051913_OK_idealized/obsdata/sao')\nradar_obs_dir = os.path.join(depot_base_dir, 'data/Projects/051913_OK_idealized/obsdata/nexrad/level2/')\n# TODO: add other obs type directories here\n\n# Experiment name and directories\nexp_name_base = '3km153x153_051913'\nexp_name_tag = '_SSEF_idealized_radarda'\nexp_name = exp_name_base + exp_name_tag\nexp_scr_dir = os.path.join(IOP_scr_dir, exp_name)\nexp_depot_dir = os.path.join(IOP_depot_dir, exp_name)\ntemplate_base_dir = os.path.join(arpsenkftools_base_dir, 'template')\ntemplate_exp_dir = os.path.join(template_base_dir, exp_name)\nexternal_icbc_dir = os.path.join(IOP_depot_dir, exp_name+'_icbc')\nsfcdata_dir = os.path.join(project_depot_dir, 'sfcdata')\nsfcdata_file = '{}.sfcdata'.format(exp_name)\nsfcdata_path = os.path.join(sfcdata_dir, sfcdata_file)\ntrndata_dir = os.path.join(project_depot_dir, 'trndata')\ntrndata_file = '{}.trndata'.format(exp_name)\ntrndata_path = os.path.join(trndata_dir, trndata_file)\nradflag_file = '2013_0519_idealized.radflag'\nradflag_path = os.path.join(template_exp_dir, radflag_file)\nradarinfo_file = 'radarinfo.dat'\nradarinfo_path = os.path.join(template_base_dir, radarinfo_file)\nblacklist_file = 'blacklist.sfc'\nblacklist_file_path = os.path.join(template_exp_dir, blacklist_file)\nremapped_radar_dir = os.path.join(project_depot_dir, 'remapped_radar/{}'.format(exp_name))\n\n# Executable file names and directories\narps_base_dir = '/home/sharm261/arps5.4'\narps_bin_dir = os.path.join(arps_base_dir, 'bin')\narpstrn_exe_path = os.path.join(arps_bin_dir, 'arpstrn')\narpssfc_exe_path = os.path.join(arps_bin_dir, 'arpssfc')\next2arps_exe_path = os.path.join(arps_bin_dir, 'ext2arps')\narps_exe_path = os.path.join(arps_bin_dir, 'arps_mpi')\narpsenkf_exe_path = os.path.join(arps_bin_dir, 'arpsenkf_mpi')\narpsenkfic_exe_path = os.path.join(arps_bin_dir, 'arpsenkfic')\nwrf2arps_exe_path = os.path.join(arps_bin_dir, 'wrf2arps_mpi')\narpsintrp_exe_path = os.path.join(arps_bin_dir, 'arpsintrp_mpi')\nradremap_exe_path = os.path.join(arps_bin_dir, '88d2arps')\nmpi_exe = 'mpiexec'\nmpi_nproc_flag = '-n'\n\n# Experiment parameters (many of these are namelist parameters that will be inserted in the\n# appropriate namelist input files for the various ARPS programs used in an experiment). See the\n# documentation in the various namelist input files for details on their meanings.\n\n# Basic experiment parameters\nnum_ensemble_members = 40\n# Initial time of entire experiment. Note, for nested ARPS runs this must be consistent with the\n# initial time of the original parent experiment!\ninitial_time = '201305191800'\ninitial_datetime = datetime.strptime(initial_time, '%Y%m%d%H%M')\n# Initial time in seconds from model start corresponding to initial_time (can be different from 0\n# if ext2arps/wrf2arps/arpsintrp is run to produce IC's for several different times)\ninitial_time_sec = 0\nperturb_ic = True\nif perturb_ic:\n external_inifile = '{}.hdf{:06d}'.format(exp_name, initial_time_sec)\n external_inigbf = '{}.hdfgrdbas'.format(exp_name)\nelse:\n external_inifile = 'ena001.hdf{:06d}'.format(initial_time_sec)\n external_inigbf = 'ena001.hdfgrdbas'\nexternal_inifile_path = os.path.join(external_icbc_dir, external_inifile)\nexternal_inigbf_path = os.path.join(external_icbc_dir, external_inigbf)\n\n# ARPS comment_lines namelist parameters\nnocmnt = 2\ncomments = ['ARPS 5.4', 'May 19th, 2013 OK outbreak']\n\n# Grid and map projection parameters\ngrid_param = {\n 'nx': 153,\n 'ny': 153,\n 'nz': 35,\n 'nproc_x': 3,\n 'nproc_y': 5,\n 'dx': 3000.0,\n 'dy': 3000.0,\n 'ctrlat': 35.3331,\n 'ctrlon': -97.2775,\n 'trulat1': 33.0,\n 'trulat2': 36.0,\n 'trulon': -97.2775,\n}\n\n# ARPSTRN parameters (note that this is set to use the 30-s terrain data. Will add hooks\n# for the other terrain data source options later)\narpstrn_param = {\n}\n\n# ARPSSFC parameters\narpssfc_param = {\n}\n\n# ARPSINTRP parameters\narpsintrp_param = {\n}\n\n# Radar remapper parameters\nradremap_param = {\n 'radar_list': ['KTLX'],\n 'start_timestamp': '20130519200000',\n 'end_timestamp': '20130519230000',\n 'interval_seconds': 300,\n 'tolerance': 300,\n 'closest_before': True,\n 'nthreads': 10}\n\n# EXT2ARPS parameters\next2arps_param = {\n}\n\n# ARPS parameters\n# Note that these include the comment, grid and map projection parameters already defined above\n# Also many of the parameters are shared with EXT2ARPS. So these are ones that are specific\n# to just the ARPS forward model component of the workflow. Parameters that aren't likely\n# to be changed very often but that are present in the namelist aren't included here, but can be\n# added as needed.\n\narps_param = {\n # Inifile and inigbf are only needed here for the arpsenkfic step. They are changed on the fly\n # during the actual ensemble integration to the appropriate ensemble member names\n 'nocmnt': nocmnt,\n 'cmnt(1)': comments[0],\n 'cmnt(2)': comments[1],\n 'runname': exp_name,\n 'initime': initial_datetime.strftime('%Y-%m-%d.%H:%M:00'),\n 'inifile': './{}'.format(external_inifile),\n 'inigbf': './{}'.format(external_inigbf),\n 'dtbig': 4.0,\n 'tstart': float(initial_time_sec),\n 'tstop': float(initial_time_sec),\n 'mphyopt': 15\n}\n\n# ARPSENKFIC parameters\narpsenkfic_param = {\n 'iniprtopt': 2,\n 'iniprt_ptprt': 2,\n 'iniprt_qv': 2,\n 'smoothopt': 2,\n 'lhor': 36000.0,\n 'lver': 7200.0,\n 'prtibgn': 3,\n 'prtiend': grid_param['nx'] - 2,\n 'prtjbgn': 3,\n 'prtjend': grid_param['ny'] - 2,\n 'prtkbgn': 3,\n 'prtkend': grid_param['nz'] - 2,\n 'prtibgnu': 3,\n 'prtiendu': grid_param['nx'] - 2,\n 'prtjbgnv': 3,\n 'prtjendv': grid_param['ny'] - 2,\n 'prtkbgnw': 3,\n 'prtkendw': grid_param['nz'] - 2,\n 'r0h_uv': 6000.0,\n 'r0v_uv': 3000.0,\n 'r0h_w': 6000.0,\n 'r0v_w': 3000.0,\n 'r0h_ptprt': 6000.0,\n 'r0v_ptprt': 3000.0,\n 'r0h_pprt': 6000.0,\n 'r0v_pprt': 3000.0,\n 'r0h_qv': 6000.0,\n 'r0v_qv': 3000.0,\n 'r0h_qli': 6000.0,\n 'r0v_qli': 3000.0,\n 'stdu': 2.0,\n 'stdv': 2.0,\n 'stdw': 0.0,\n 'stdptprt': 1.0,\n 'stdpprt': 0.0,\n 'stdqv': 0.0006,\n 'stdqrelative': 0.1,\n}\n\n# ARPSENKF parameters.\narpsenkf_param = {\n 'nrdrused': 1,\n 'radarname': ['KTLX'],\n 'ntwtype': [1,1,1,1],\n 'vcpmode': [11,11,11,11],\n 'rdrlocopt': [1,1,1,1]\n}\n\n# Parameters to generate an appropriate radflag file. Used by \"gen_radflag.py\"\nradflag_param = {\n # Add appropriate \"radar groups\" (i.e. all radars, only WSR-88Ds, only mobile, etc.)\n # And the time range for each to assimilate. Note that the gen_radflag.py script assumes\n # that there is no overlap between the times for each radar group.\n 'radar_groups': {\n 'all_radars': (arpsenkf_param['radarname'], np.arange(7200, 18000. + 300., 300.))\n },\n}\n\n"
] |
[
[
"numpy.arange"
],
[
"numpy.arange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
githubcstahlhut/EDoHa
|
[
"56283eac605b2b50988cc2f7ee696242eec1f34e",
"56283eac605b2b50988cc2f7ee696242eec1f34e"
] |
[
"src/main/python/TFELTrainer.py",
"src/main/python/TFELAttentionTrainer.py"
] |
[
"\nimport argparse\nimport glob\n\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow as tf\n\nfrom keras.preprocessing import sequence\nfrom tensorflow.contrib import rnn\n\nfrom sklearn.metrics import classification_report, precision_recall_fscore_support\n\nfrom vectorizer import TokenizingEmbeddingVectorizer\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\"Trains a simple BiLSTM to detect sentential arguments across multiple topics.\")\n\n parser.add_argument(\"--embeddings\", type=str, help=\"The path to the embedding folder.\")\n parser.add_argument(\"--train\", type=str, help=\"The training data file.\")\n parser.add_argument(\"--test\", type=str, help=\"The testing data file.\")\n parser.add_argument(\"--epochs\", type=int)\n parser.add_argument(\"--seed\", type=int)\n\n return parser.parse_args()\n\n\ndef read_data(data_path):\n data = pd.read_csv(data_path, sep=\",\", quotechar=\"'\", header=0)\n # data = pd.read_csv(data_path)\n return data\n\n\ndef create_model(embeddings, max_length, seed):\n\n tf.set_random_seed(seed)\n\n # Based on https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/bidirectional_rnn.py\n num_classes = 2\n dims = 100\n learning_rate = 0.001\n X_t = tf.placeholder(tf.int32, [None, max_length], name=\"topic_input\")\n X_s = tf.placeholder(tf.int32, [None, max_length], name=\"sentence_input\")\n L_t = tf.placeholder(tf.int32, [None, ], name=\"topic_length\")\n L_s = tf.placeholder(tf.int32, [None, ], name=\"sentence_length\")\n Y = tf.placeholder(tf.float32, [None, num_classes], name=\"target\")\n\n def BiRNN(x, layer):\n\n with tf.variable_scope('encoder_{}'.format(layer),reuse=False):\n # Prepare data shape to match `rnn` function requirements\n # Current data input shape: (batch_size, timesteps, n_input)\n # Required shape: 'timesteps' tensors list of shape (batch_size, num_input)\n\n # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input)\n x = tf.unstack(x, max_length, 1)\n\n # Define lstm cells with tensorflow\n # Forward direction cell\n lstm_fw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n # Backward direction cell\n lstm_bw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n\n # Get lstm cell output\n outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, dtype=tf.float32)\n\n print(\"BiLSTM lengths: \", len(outputs))\n # Linear activation, using rnn inner loop last output\n return outputs[-1]\n\n\n topic_word_embeddings = tf.Variable(embeddings, dtype=tf.float32, name=\"topic_embeddings\")\n topic_embedded_word_id = tf.nn.embedding_lookup(topic_word_embeddings, X_t)\n\n sentence_word_embeddings = tf.Variable(embeddings, dtype=tf.float32, name=\"sentence_embeddings\")\n sentence_embedded_word_id = tf.nn.embedding_lookup(sentence_word_embeddings, X_s)\n\n topic_bilstm_out = BiRNN(topic_embedded_word_id, \"topic\")\n sentence_bilstm_out = BiRNN(sentence_embedded_word_id, \"sentence\") \n output = tf.concat((topic_bilstm_out, sentence_bilstm_out), axis=1)\n logits = tf.layers.dense(output, 2)\n prediction = tf.nn.softmax(logits, name=\"output\")\n\n # Define loss and optimizer\n loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_op, name=\"train\")\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n return X_t, X_s, L_t, L_s, Y, prediction, train_op\n\n\nif \"__main__\"==__name__:\n\n args = parse_arguments()\n train_data = read_data(args.train)# .sample(frac=1, random_state=args.seed)[::1]\n\n train_links = train_data[[\"topic\", \"candidate\"]].values\n train_labels = train_data[\"label\"].values == \"link\"\n two_d_train_labels = np.zeros((train_labels.shape[0], 2))\n two_d_train_labels[np.where(train_labels==0), 0] = 1\n two_d_train_labels[np.where(train_labels==1), 1] = 1\n\n hypotheses = train_links[:, 0]\n sentences = train_links[:, 1]\n\n vectorizer = TokenizingEmbeddingVectorizer(args.embeddings)\n\n tokenized_hypotheses = vectorizer.tokenize_sentences(hypotheses)\n tokenized_sentences = vectorizer.tokenize_sentences(sentences)\n\n hypothesis_max_length = max(map(lambda s: len(s.split(\" \")), hypotheses))\n sentence_max_length = max(map(lambda s: len(s.split(\" \")), sentences))\n\n vectorized_hypotheses = vectorizer.sentences_to_padded_indices(hypotheses, sentence_max_length)\n vectorized_sentences = vectorizer.sentences_to_padded_indices(sentences, sentence_max_length)\n\n print(\"hypotheses.shape: \", vectorized_hypotheses.shape)\n print(\"hypotheses: \", vectorized_hypotheses)\n print(\"sentences.shape: \", vectorized_sentences.shape)\n print(\"sentences: \", vectorized_sentences)\n\n # Train model\n X_t, X_s, L_t, L_s, Y, output, train_op = create_model(vectorizer.embeddings, sentence_max_length, args.seed)\n # initialise graph\n init = tf.global_variables_initializer()\n\n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n sess.run(init)\n\n batch_size = 32\n\n for i in range(args.epochs):\n # Run batches, because...\n num_batches = (len(train_data) + batch_size -1) // batch_size # Add one to get the remaining samples\n print(\"Training epoch {0}/{1} with {2} batches for {3} samples.\".format(i+1, args.epochs, num_batches, len(train_data)))\n for batch in range(num_batches):\n begin_idx = batch * batch_size\n end_idx = min((batch+1)*batch_size, len(train_data)) # make sure to not go beyond number of samples\n print(\"\\tRunning batch {0} of {1} with indices [{2}:{3}]\".format(batch, num_batches, begin_idx, end_idx))\n feed_dict = {X_t: vectorized_hypotheses[begin_idx:end_idx], X_s: vectorized_sentences[begin_idx:end_idx], Y:two_d_train_labels[begin_idx:end_idx]}\n sess.run(train_op, feed_dict=feed_dict)\n\n test_data = read_data(args.test)\n test_links = test_data[[\"topic\", \"candidate\"]].values\n test_labels = test_data[\"label\"].values == \"link\"\n\n test_hypotheses = test_links[:, 0]\n test_sentences = test_links[:, 1]\n\n test_vectorized_hypotheses = vectorizer.sentences_to_padded_indices(test_hypotheses, sentence_max_length)\n test_vectorized_sentences = vectorizer.sentences_to_padded_indices(test_sentences, sentence_max_length)\n\n\n test_feed_dict = {X_t: test_vectorized_hypotheses, X_s: test_vectorized_sentences}\n raw_preds = sess.run(output, test_feed_dict)\n preds = np.argmax(raw_preds, axis=1)\n print(preds)\n print(classification_report(test_labels, preds, target_names=[\"no-link\", \"link\"]))\n print(\"Macro: \", precision_recall_fscore_support(test_labels, preds, average=\"macro\"))\n print(\"Evidence: \", precision_recall_fscore_support(test_labels, preds, labels=[1]))\n\n\n builder = tf.saved_model.builder.SavedModelBuilder(\"hypothesisevidencelinking\")\n builder.add_meta_graph_and_variables(\n sess,\n [tf.saved_model.tag_constants.SERVING],\n signature_def_map= {\n \"magic_model\": tf.saved_model.signature_def_utils.predict_signature_def({\"topic_input\": X_t, \"sentence_input\": X_s, \"target\": Y}, {\"output\": output})\n })\n builder.save()\n",
"import argparse\nimport glob\n\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow as tf\n\nfrom keras.preprocessing import sequence\nfrom tensorflow.contrib import rnn\nfrom tensorflow.contrib import seq2seq\n\nfrom sklearn.metrics import classification_report, precision_recall_fscore_support\n\nfrom evidencedetection.vectorizer import TokenizingEmbeddingVectorizer\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\"Trains a simple BiLSTM to detect sentential arguments across multiple topics.\")\n\n parser.add_argument(\"--embeddings\", type=str, help=\"The path to the embedding folder.\")\n parser.add_argument(\"--train\", type=str, help=\"The training data file.\")\n parser.add_argument(\"--test\", type=str, help=\"The testing data file.\")\n parser.add_argument(\"--epochs\", type=int)\n parser.add_argument(\"--seed\", type=int)\n\n return parser.parse_args()\n\n\ndef read_data(data_path):\n data = pd.read_csv(data_path, sep=\",\", quotechar=\"'\", header=0)\n # data = pd.read_csv(data_path)\n return data\n\n\ndef _mask_3d(inputs, sentence_lengths, mask_value, dimension=2):\n\n if dimension == 1:\n inputs = tf.transpose(inputs, [0, 2, 1])\n\n time_steps1 = tf.shape(inputs)[1]\n time_steps2 = tf.shape(inputs)[2]\n\n pad_values = mask_value * tf.ones_like(inputs, dtype=tf.float32)\n mask = tf.sequence_mask(sentence_lengths, time_steps2)\n\n mask_3d = tf.tile(tf.expand_dims(mask, 1), (1, time_steps1, 1))\n masked = tf.where(mask_3d, inputs, pad_values)\n\n if dimension == 1:\n masked = tf.transpose(masked, [0, 2, 1])\n return masked\n\n\ndef _atten_softmax3d(inputs):\n\n shape = tf.shape(inputs)\n num_units = shape[2]\n inputs = tf.reshape(inputs, tf.stack([-1, num_units]))\n soft_max = tf.nn.softmax(inputs)\n soft_max = tf.reshape(soft_max, shape)\n return soft_max\n\n\ndef _inter_atten(claim, sent, claim_lengths, sent_lengths):\n\n with tf.variable_scope('inter-attention') as scope:\n sent_T = tf.transpose(sent, [0, 2, 1])\n attention = tf.matmul(claim, sent_T)\n\n masked = _mask_3d(attention, sent_lengths, -np.inf)\n att_sent1 = _atten_softmax3d(masked)\n\n att_transpose = tf.transpose(attention, [0, 2, 1])\n masked = _mask_3d(att_transpose, claim_lengths, -np.inf)\n att_sent2 = _atten_softmax3d(masked)\n\n alpha = tf.matmul(att_sent2, claim, name=\"alpha\")\n # self.alpha = alpha\n beta = tf.matmul(att_sent1, sent, name=\"beta\")\n\n return alpha, beta\n # return att_sent1, att_sent2\n\n\n\ndef create_model(embeddings, hypothesis_max_length, sentence_max_length, seed):\n\n tf.set_random_seed(seed)\n\n # Based on https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/bidirectional_rnn.py\n num_classes = 2\n dims = 100\n learning_rate = 0.001\n X_t = tf.placeholder(tf.int32, [None, hypothesis_max_length], name=\"topic_input\")\n X_s = tf.placeholder(tf.int32, [None, sentence_max_length], name=\"sentence_input\")\n L_t = tf.placeholder(tf.int32, [None, ], name=\"topic_length\")\n L_s = tf.placeholder(tf.int32, [None, ], name=\"sentence_length\")\n Y = tf.placeholder(tf.float32, [None, num_classes], name=\"target\")\n\n def BiRNN(x, layer):\n\n with tf.variable_scope('encoder_{}'.format(layer),reuse=False):\n # Prepare data shape to match `rnn` function requirements\n # Current data input shape: (batch_size, timesteps, n_input)\n # Required shape: 'timesteps' tensors list of shape (batch_size, num_input)\n\n # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input)\n # x = tf.unstack(x, max_length, 1)\n\n # Define lstm cells with tensorflow\n # Forward direction cell\n lstm_fw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n # Backward direction cell\n lstm_bw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n\n\n ((fw_outputs, bw_outputs), (fw_states, bw_states)) = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,\n lstm_bw_cell,\n x,\n dtype=tf.float32)\n outputs = tf.concat([fw_outputs, bw_outputs], axis=2)\n\n\n # print(\"BiLSTM lengths: \", len(outputs))\n # Linear activation, using rnn inner loop last output\n return outputs\n\n def BiRNNAtt(x, attention, layer) :\n with tf.variable_scope('encoder_sentence_{}'.format(layer),reuse=False):\n # Prepare data shape to match `rnn` function requirements\n # Current data input shape: (batch_size, timesteps, n_input)\n # Required shape: 'timesteps' tensors list of shape (batch_size, num_input)\n\n # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input)\n # x = tf.unstack(x, max_length, 1)\n\n # Define lstm cells with tensorflow\n # Forward direction cell\n lstm_fw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n lstm_fw_att = seq2seq.AttentionWrapper(lstm_fw_cell, attention)\n # Backward direction cell\n lstm_bw_cell = rnn.BasicLSTMCell(dims, forget_bias=1.0)\n lstm_bw_att = seq2seq.AttentionWrapper(lstm_bw_cell, attention)\n\n\n ((fw_outputs, bw_outputs), (fw_states, bw_states)) = tf.nn.bidirectional_dynamic_rnn(lstm_fw_att,\n lstm_bw_att,\n x,\n dtype=tf.float32)\n outputs = tf.concat([fw_outputs, bw_outputs], axis=2)\n\n\n # print(\"BiLSTM lengths: \", len(outputs))\n # Linear activation, using rnn inner loop last output\n return outputs\n\n\n topic_word_embeddings = tf.Variable(embeddings, dtype=tf.float32, name=\"topic_embeddings\")\n topic_embedded_word_id = tf.nn.embedding_lookup(topic_word_embeddings, X_t)\n\n sentence_word_embeddings = tf.Variable(embeddings, dtype=tf.float32, name=\"sentence_embeddings\")\n sentence_embedded_word_id = tf.nn.embedding_lookup(sentence_word_embeddings, X_s)\n\n topic_bilstm_out = BiRNN(topic_embedded_word_id, \"topic\")\n\n attention_mechanism = seq2seq.LuongAttention(100, topic_bilstm_out, L_t)\n # sentence_bilstm_out = BiRNNAtt(sentence_embedded_word_id, attention_mechanism, \"sentence\")\n sentence_bilstm_out = BiRNN(sentence_embedded_word_id, \"sentence\")\n # output = tf.concat((topic_bilstm_out[:, -1], sentence_bilstm_out[:, -1]), axis=1)\n sentence_attention, topic_attention = _inter_atten(topic_bilstm_out, sentence_bilstm_out, L_t, L_s) # TODO CST 2019-06-28: Add sentence lengths as input\n # wheigh by attention\n topic_att_wheighted = tf.multiply(topic_bilstm_out, tf.multiply(topic_bilstm_out, topic_attention))\n sentence_att_wheighted = tf.multiply(sentence_bilstm_out, tf.multiply(sentence_bilstm_out, sentence_attention))\n # attention diff\n topic_att_diff = tf.subtract(topic_bilstm_out, topic_attention)\n sentence_att_diff = tf.subtract(sentence_bilstm_out, sentence_attention)\n # attention_output = tf.reduce_sum(tf.concat((topic_attention, sentence_attention), axis=1), axis=1)\n # attention_output = tf.reduce_sum(tf.concat((topic_attention, sentence_attention, topic_att_wheighted, sentence_att_wheighted, topic_att_diff, sentence_att_diff), axis=1), axis=1)\n attention_output = tf.reduce_sum(tf.concat((topic_att_wheighted, sentence_att_wheighted), axis=1), axis=1)\n output = tf.concat((topic_bilstm_out[:, -1], sentence_bilstm_out[:, -1], attention_output), axis=1)\n # output = attention_output\n # output = tf.concat((topic_bilstm_out[:, -1], sentence_bilstm_out[:, -1]), axis=1)\n\n logits = tf.layers.dense(output, 2)\n prediction = tf.nn.softmax(logits, name=\"output\")\n\n # Define loss and optimizer\n loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_op, name=\"train\")\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n return X_t, X_s, L_t, L_s, Y, prediction, train_op\n\n\nif \"__main__\"==__name__:\n\n args = parse_arguments()\n train_data = read_data(args.train).sample(frac=1)[::1]\n\n train_links = train_data[[\"topic\", \"candidate\"]].values\n train_labels = train_data[\"label\"].values == \"link\"\n # train_labels = train_data[\"label\"].values\n two_d_train_labels = np.zeros((train_labels.shape[0], 2))\n two_d_train_labels[np.where(train_labels==0), 0] = 1\n two_d_train_labels[np.where(train_labels==1), 1] = 1\n\n hypotheses = train_links[:, 0]\n sentences = train_links[:, 1]\n\n vectorizer = TokenizingEmbeddingVectorizer(args.embeddings, [\"[REF\", \"[REF]\"])\n\n tokenized_hypotheses = vectorizer.tokenize_sentences(hypotheses)\n tokenized_sentences = vectorizer.tokenize_sentences(sentences)\n\n hypothesis_lengths = list(map(lambda s: len(s.split(\" \")), hypotheses))\n hypothesis_max_length = max(hypothesis_lengths)\n sentence_lengths = list(map(lambda s: len(s.split(\" \")), sentences))\n sentence_max_length = max(sentence_lengths)\n\n vectorized_hypotheses = vectorizer.sentences_to_padded_indices(hypotheses, hypothesis_max_length, padding=\"pre\")\n vectorized_sentences = vectorizer.sentences_to_padded_indices(sentences, sentence_max_length, padding=\"pre\")\n\n print(\"hypotheses.shape: \", vectorized_hypotheses.shape)\n print(\"hypotheses: \", vectorized_hypotheses)\n print(\"sentences.shape: \", vectorized_sentences.shape)\n print(\"sentences: \", vectorized_sentences)\n\n\n # Train model\n X_t, X_s, L_t, L_s, Y, output, train_op = create_model(vectorizer.embeddings, hypothesis_max_length, sentence_max_length, args.seed)\n # initialise graph\n init = tf.global_variables_initializer()\n\n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n # sess = tf.Session(graph=tf.get_default_graph())\n sess.run(init)\n\n batch_size = 32\n\n for i in range(args.epochs):\n # Run batches, because...\n num_batches = (len(train_data) + batch_size -1) // batch_size # Add one to get the remaining samples\n print(\"Training epoch {0}/{1} with {2} batches for {3} samples.\".format(i+1, args.epochs, num_batches, len(train_data)))\n for batch in range(num_batches):\n begin_idx = batch * batch_size\n end_idx = min((batch+1)*batch_size, len(train_data)) # make sure to not go beyond number of samples\n print(\"\\tRunning batch {0} of {1} with indices [{2}:{3}]\".format(batch, num_batches, begin_idx, end_idx))\n feed_dict = {X_t: vectorized_hypotheses[begin_idx:end_idx],\n X_s: vectorized_sentences[begin_idx:end_idx],\n L_t: hypothesis_lengths[begin_idx:end_idx],\n L_s: sentence_lengths[begin_idx:end_idx],\n Y:two_d_train_labels[begin_idx:end_idx]}\n sess.run(train_op, feed_dict=feed_dict)\n\n test_data = read_data(args.test)\n test_links = test_data[[\"topic\", \"candidate\"]].values\n test_labels = test_data[\"label\"].values == \"link\"\n # test_labels = test_data[\"label\"].values\n\n test_hypotheses = test_links[:, 0]\n test_sentences = test_links[:, 1]\n\n test_hypothesis_lengths = list(map(lambda s: len(s.split(\" \")), test_hypotheses))\n test_sentence_lengths = list(map(lambda s: len(s.split(\" \")), test_sentences))\n\n test_vectorized_hypotheses = vectorizer.sentences_to_padded_indices(test_hypotheses, hypothesis_max_length, padding=\"pre\")\n test_vectorized_sentences = vectorizer.sentences_to_padded_indices(test_sentences, sentence_max_length, padding=\"pre\")\n\n\n test_feed_dict = {X_t: test_vectorized_hypotheses, X_s: test_vectorized_sentences, L_t: test_hypothesis_lengths, L_s: test_sentence_lengths}\n raw_preds = sess.run(output, test_feed_dict)\n preds = np.argmax(raw_preds, axis=1)\n print(preds)\n print(classification_report(test_labels, preds, target_names=[\"no-link\", \"link\"]))\n print(\"Macro: \", precision_recall_fscore_support(test_labels, preds, average=\"macro\"))\n\n\n builder = tf.saved_model.builder.SavedModelBuilder(\"evidencelinking-en-trainable\")\n builder.add_meta_graph_and_variables(\n sess,\n [tf.saved_model.tag_constants.SERVING],\n signature_def_map= {\n \"magic_model\": tf.saved_model.signature_def_utils.predict_signature_def({\"topic_input\": X_t, \"sentence_input\": X_s, \"target\": Y}, {\"output\": output})\n })\n builder.save()\n"
] |
[
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.concat",
"sklearn.metrics.precision_recall_fscore_support",
"tensorflow.train.AdamOptimizer",
"tensorflow.get_default_graph",
"sklearn.metrics.classification_report",
"numpy.where",
"pandas.read_csv",
"tensorflow.Variable",
"tensorflow.layers.dense",
"tensorflow.ConfigProto",
"numpy.argmax",
"numpy.zeros",
"tensorflow.unstack",
"tensorflow.saved_model.builder.SavedModelBuilder",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.rnn.static_bidirectional_rnn",
"tensorflow.set_random_seed",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.softmax",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.saved_model.signature_def_utils.predict_signature_def"
],
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.nn.bidirectional_dynamic_rnn",
"sklearn.metrics.precision_recall_fscore_support",
"tensorflow.where",
"tensorflow.train.AdamOptimizer",
"tensorflow.get_default_graph",
"sklearn.metrics.classification_report",
"numpy.where",
"pandas.read_csv",
"tensorflow.contrib.seq2seq.LuongAttention",
"tensorflow.Variable",
"tensorflow.layers.dense",
"tensorflow.subtract",
"tensorflow.ConfigProto",
"numpy.argmax",
"numpy.zeros",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.saved_model.builder.SavedModelBuilder",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.seq2seq.AttentionWrapper",
"tensorflow.set_random_seed",
"tensorflow.sequence_mask",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.multiply",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.variable_scope",
"tensorflow.saved_model.signature_def_utils.predict_signature_def"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
TortoiseHam/fastestimator
|
[
"6061a4fbbeb62a2194ef82ba8017f651710d0c65",
"6061a4fbbeb62a2194ef82ba8017f651710d0c65",
"6061a4fbbeb62a2194ef82ba8017f651710d0c65",
"6061a4fbbeb62a2194ef82ba8017f651710d0c65"
] |
[
"fastestimator/trace/metric/recall.py",
"fastestimator/trace/xai/grad_cam.py",
"apphub/anomaly_detection/alocc/alocc_torch.py",
"fastestimator/op/numpyop/univariate/rua.py"
] |
[
"# Copyright 2019 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom typing import Any, Dict, Union, Iterable\n\nimport numpy as np\nfrom sklearn.metrics import recall_score\n\nfrom fastestimator.trace.meta.per_ds import per_ds\nfrom fastestimator.trace.trace import Trace\nfrom fastestimator.util.data import Data\nfrom fastestimator.util.traceability_util import traceable\nfrom fastestimator.util.util import to_number\n\n\n@per_ds\n@traceable()\nclass Recall(Trace):\n \"\"\"Compute recall for a classification task and report it back to the logger.\n\n Args:\n true_key: Name of the key that corresponds to ground truth in the batch dictionary.\n pred_key: Name of the key that corresponds to predicted score in the batch dictionary.\n mode: What mode(s) to execute this Trace in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Trace in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n output_name: Name of the key to store to the state.\n per_ds: Whether to automatically compute this metric individually for every ds_id it runs on, in addition to\n computing an aggregate across all ds_ids on which it runs. This is automatically False if `output_name`\n contains a \"|\" character.\n **kwargs: Additional keyword arguments that pass to sklearn.metrics.recall_score()\n\n Raises:\n ValueError: One of [\"y_true\", \"y_pred\", \"average\"] argument exists in `kwargs`.\n \"\"\"\n def __init__(self,\n true_key: str,\n pred_key: str,\n mode: Union[None, str, Iterable[str]] = (\"eval\", \"test\"),\n ds_id: Union[None, str, Iterable[str]] = None,\n output_name: str = \"recall\",\n per_ds: bool = True,\n **kwargs) -> None:\n Recall.check_kwargs(kwargs)\n super().__init__(inputs=(true_key, pred_key), outputs=output_name, mode=mode, ds_id=ds_id)\n self.binary_classification = None\n self.y_true = []\n self.y_pred = []\n self.kwargs = kwargs\n self.per_ds = per_ds\n\n @property\n def true_key(self) -> str:\n return self.inputs[0]\n\n @property\n def pred_key(self) -> str:\n return self.inputs[1]\n\n def on_epoch_begin(self, data: Data) -> None:\n self.y_true = []\n self.y_pred = []\n\n def on_batch_end(self, data: Data) -> None:\n y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key])\n self.binary_classification = y_pred.shape[-1] == 1\n if y_true.shape[-1] > 1 and y_true.ndim > 1:\n y_true = np.argmax(y_true, axis=-1)\n if y_pred.shape[-1] > 1:\n y_pred = np.argmax(y_pred, axis=-1)\n else:\n y_pred = np.round(y_pred)\n assert y_pred.size == y_true.size\n self.y_pred.extend(y_pred.ravel())\n self.y_true.extend(y_true.ravel())\n\n def on_epoch_end(self, data: Data) -> None:\n if self.binary_classification:\n score = recall_score(self.y_true, self.y_pred, average='binary', **self.kwargs)\n else:\n score = recall_score(self.y_true, self.y_pred, average=None, **self.kwargs)\n data.write_with_log(self.outputs[0], score)\n\n @staticmethod\n def check_kwargs(kwargs: Dict[str, Any]) -> None:\n \"\"\"Check if `kwargs` has any blacklist argument and raise an error if it does.\n\n Args:\n kwargs: Keywork arguments to be examined.\n\n Raises:\n ValueError: One of [\"y_true\", \"y_pred\", \"average\"] argument exists in `kwargs`.\n \"\"\"\n blacklist = [\"y_true\", \"y_pred\", \"average\"]\n illegal_kwarg = [x for x in blacklist if x in kwargs]\n if illegal_kwarg:\n raise ValueError(\n f\"Arguments {illegal_kwarg} cannot exist in kwargs, since FastEstimator will later directly use them in\"\n \" sklearn.metrics.recall_score()\")\n",
"# Copyright 2021 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom typing import Any, Dict, Iterable, Optional, TypeVar, Union\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport torch\n\nfrom fastestimator.backend.argmax import argmax\nfrom fastestimator.backend.concat import concat\nfrom fastestimator.backend.get_image_dims import get_image_dims\nfrom fastestimator.backend.reduce_max import reduce_max\nfrom fastestimator.backend.squeeze import squeeze\nfrom fastestimator.trace.trace import Trace\nfrom fastestimator.util.data import Data\nfrom fastestimator.util.img_data import ImgData\nfrom fastestimator.util.traceability_util import traceable\nfrom fastestimator.util.util import to_number\n\nTensor = TypeVar('Tensor', tf.Tensor, torch.Tensor, np.ndarray)\n\n\n@traceable()\nclass GradCAM(Trace):\n \"\"\"A trace which draws GradCAM heatmaps on top of images.\n\n These are useful for visualizing supports for a model's classification. See https://arxiv.org/pdf/1610.02391.pdf\n for more details.\n\n Args:\n images: The key corresponding to images onto which to draw the CAM outputs.\n grads: The key corresponding to gradients of the model output with respect to a convolution layer of the model.\n You can easily extract these from any model by using the 'intermediate_layers' variable in a ModelOp, along\n with the GradientOp. Make sure to select a particular component of y_pred when computing gradients rather\n than using the entire vector. See our GradCAM XAI tutorial for an example.\n n_components: How many principal components to visualize.\n n_samples: How many images in total to display every epoch (or None to display all available images).\n labels: The key corresponding to the true labels of the images to be visualized.\n preds: The key corresponding to the model prediction for each image.\n label_mapping: {class_string: model_output_value}.\n outputs: The key into which to write the eigencam images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Trace in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n \"\"\"\n def __init__(self,\n images: str,\n grads: str,\n n_components: int = 3,\n n_samples: Optional[int] = 5,\n labels: Optional[str] = None,\n preds: Optional[str] = None,\n label_mapping: Optional[Dict[str, Any]] = None,\n outputs: str = \"gradcam\",\n mode: Union[None, str, Iterable[str]] = \"!train\",\n ds_id: Union[None, str, Iterable[str]] = None):\n self.image_key = images\n self.grad_key = grads\n self.true_label_key = labels\n self.pred_label_key = preds\n inputs = [x for x in (images, grads, labels, preds) if x is not None]\n self.n_components = n_components\n self.n_samples = n_samples\n # TODO - handle non-hashable labels\n self.label_mapping = {val: key for key, val in label_mapping.items()} if label_mapping else None\n super().__init__(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id)\n self.images = []\n self.grads = []\n self.labels = []\n self.preds = []\n self.n_found = 0\n\n def _reset(self) -> None:\n \"\"\"Clear memory for next epoch.\n \"\"\"\n self.images = []\n self.grads = []\n self.labels = []\n self.preds = []\n self.n_found = 0\n\n def on_batch_end(self, data: Data) -> None:\n if self.n_samples is None or self.n_found < self.n_samples:\n self.images.append(data[self.image_key])\n self.grads.append(data[self.grad_key])\n if self.true_label_key:\n self.labels.append(data[self.true_label_key])\n if self.pred_label_key:\n self.preds.append(data[self.pred_label_key])\n self.n_found += len(data[self.image_key])\n\n def on_epoch_end(self, data: Data) -> None:\n # Keep only the user-specified number of samples\n images = concat(self.images)[:self.n_samples or self.n_found]\n _, height, width = get_image_dims(images)\n grads = to_number(concat(self.grads)[:self.n_samples or self.n_found])\n if tf.is_tensor(images):\n grads = np.moveaxis(grads, source=-1, destination=1) # grads should be channel first\n args = {}\n labels = None if not self.labels else concat(self.labels)[:self.n_samples or self.n_found]\n if labels is not None:\n if len(labels.shape) > 1:\n labels = argmax(labels, axis=-1)\n if self.label_mapping:\n labels = np.array([self.label_mapping[clazz] for clazz in to_number(squeeze(labels))])\n args[self.true_label_key] = labels\n preds = None if not self.preds else concat(self.preds)[:self.n_samples or self.n_found]\n if preds is not None:\n if len(preds.shape) > 1:\n preds = argmax(preds, axis=-1)\n if self.label_mapping:\n preds = np.array([self.label_mapping[clazz] for clazz in to_number(squeeze(preds))])\n args[self.pred_label_key] = preds\n args[self.image_key] = images\n # Clear memory\n self._reset()\n # Make the image\n # TODO: In future maybe allow multiple different grads to have side-by-side comparisons of classes\n components = [np.mean(grads, axis=1)]\n components = [np.maximum(component, 0) for component in components]\n masks = []\n for component_batch in components:\n img_batch = []\n for img in component_batch:\n img = cv2.resize(img, (height, width))\n img = img - np.min(img)\n img = img / np.max(img)\n img = cv2.cvtColor(cv2.applyColorMap(np.uint8(255 * img), cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB)\n img = np.float32(img) / 255\n img_batch.append(img)\n img_batch = np.array(img_batch, dtype=np.float32)\n # Switch to channel first for pytorch\n if isinstance(images, torch.Tensor):\n img_batch = np.moveaxis(img_batch, source=-1, destination=1)\n masks.append(img_batch)\n\n components = [images + mask for mask in masks] # This seems to work even if the image is 1 channel instead of 3\n components = [image / reduce_max(image) for image in components]\n\n for elem in components:\n args[self.grad_key] = elem\n\n result = ImgData(**args)\n data.write_without_log(self.outputs[0], result)\n",
"# Copyright 2019 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport tempfile\n\nimport numpy as np\nimport tensorflow as tf\nimport torch\nimport torch.nn as nn\nfrom sklearn.metrics import auc, f1_score, roc_curve\nfrom torch.nn.init import normal_\n\nimport fastestimator as fe\nfrom fastestimator.backend import binary_crossentropy\nfrom fastestimator.op.numpyop import LambdaOp\nfrom fastestimator.op.numpyop.univariate import ChannelTranspose, ExpandDims, Normalize\nfrom fastestimator.op.tensorop import TensorOp\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\nfrom fastestimator.trace import Trace\nfrom fastestimator.trace.io import BestModelSaver\nfrom fastestimator.util import to_number\n\n\nclass reconstructor(nn.Module):\n def __init__(self):\n super().__init__()\n self.encoder = nn.Sequential(\n nn.Conv2d(1, 32, 5, stride=2, padding=2), # (self, in_channels, out_channels, kernel_size, stride=1,\n nn.BatchNorm2d(32),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(32, 64, 5, stride=2, padding=2),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(64, 128, 5, stride=2, padding=2),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.BatchNorm2d(128),\n )\n self.decoder = nn.Sequential(nn.ConvTranspose2d(128, 32, 5, stride=2, padding=2),\n nn.BatchNorm2d(32),\n nn.ReLU(True),\n nn.ConvTranspose2d(32, 16, 5, stride=2, padding=2, output_padding=1),\n nn.BatchNorm2d(16),\n nn.ReLU(True),\n nn.ConvTranspose2d(16, 1, 5, stride=2, padding=2, output_padding=1),\n nn.Tanh())\n\n for layer in self.encoder:\n if isinstance(layer, nn.Conv2d):\n normal_(layer.weight.data, mean=0, std=0.02)\n\n for layer in self.decoder:\n if isinstance(layer, nn.ConvTranspose2d):\n normal_(layer.weight.data, mean=0, std=0.02)\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass discriminator(nn.Module):\n def __init__(self):\n super().__init__()\n self.layers = nn.Sequential(nn.Conv2d(1, 16, 5, stride=2, padding=2),\n nn.BatchNorm2d(16),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(16, 32, 5, stride=2, padding=2),\n nn.BatchNorm2d(32),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(32, 64, 5, stride=2, padding=2),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(64, 128, 5, stride=2, padding=2),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n Flatten(),\n nn.Linear(512, 1),\n nn.Sigmoid())\n\n for layer in self.layers:\n if isinstance(layer, nn.Conv2d):\n normal_(layer.weight.data, mean=0, std=0.02)\n\n def forward(self, x):\n x = self.layers(x)\n return x\n\n\nclass RLoss(TensorOp):\n def __init__(self, alpha=0.2, inputs=None, outputs=None, mode=None):\n super().__init__(inputs, outputs, mode)\n self.alpha = alpha\n\n def forward(self, data, state):\n fake_score, x_fake, x = data\n recon_loss = binary_crossentropy(y_true=x, y_pred=x_fake, from_logits=True)\n adv_loss = binary_crossentropy(y_pred=fake_score, y_true=torch.ones_like(fake_score), from_logits=True)\n return adv_loss + self.alpha * recon_loss\n\n\nclass DLoss(TensorOp):\n def forward(self, data, state):\n true_score, fake_score = data\n real_loss = binary_crossentropy(y_pred=true_score, y_true=torch.ones_like(true_score), from_logits=True)\n fake_loss = binary_crossentropy(y_pred=fake_score, y_true=torch.zeros_like(fake_score), from_logits=True)\n total_loss = real_loss + fake_loss\n return total_loss\n\n\nclass F1AUCScores(Trace):\n \"\"\"Computes F1-Score and AUC Score for a classification task and reports it back to the logger.\n \"\"\"\n def __init__(self, true_key, pred_key, mode=(\"eval\", \"test\"), output_name=(\"auc_score\", \"f1_score\")):\n super().__init__(inputs=(true_key, pred_key), outputs=output_name, mode=mode)\n self.y_true = []\n self.y_pred = []\n\n @property\n def true_key(self):\n return self.inputs[0]\n\n @property\n def pred_key(self):\n return self.inputs[1]\n\n def on_epoch_begin(self, data):\n self.y_true = []\n self.y_pred = []\n\n def on_batch_end(self, data):\n y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key])\n assert y_pred.size == y_true.size\n self.y_pred.extend(y_pred.ravel())\n self.y_true.extend(y_true.ravel())\n\n def on_epoch_end(self, data):\n fpr, tpr, thresholds = roc_curve(self.y_true, self.y_pred, pos_label=1) # (y, score, positive_label)\n roc_auc = auc(fpr, tpr)\n eer_threshold = thresholds[np.nanargmin(np.absolute((1 - tpr - fpr)))]\n y_pred_class = np.copy(self.y_pred)\n y_pred_class[y_pred_class >= eer_threshold] = 1\n y_pred_class[y_pred_class < eer_threshold] = 0\n f_score = f1_score(self.y_true, y_pred_class, pos_label=0)\n\n data.write_with_log(self.outputs[0], roc_auc)\n data.write_with_log(self.outputs[1], f_score)\n\n\ndef get_estimator(epochs=20, batch_size=128, max_train_steps_per_epoch=None, save_dir=tempfile.mkdtemp()):\n # Dataset Creation\n (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()\n x_eval0, y_eval0 = x_eval[np.where((y_eval == 1))], np.ones(y_eval[np.where((y_eval == 1))].shape)\n x_eval1, y_eval1 = x_eval[np.where((y_eval != 1))], y_eval[np.where((y_eval != 1))]\n\n # Ensuring outliers comprise 50% of the dataset\n index = np.random.choice(x_eval1.shape[0], int(x_eval0.shape[0]), replace=False)\n x_eval1, y_eval1 = x_eval1[index], np.zeros(y_eval1[index].shape)\n\n x_train, y_train = x_train[np.where((y_train == 1))], np.zeros(y_train[np.where((y_train == 1))].shape)\n train_data = fe.dataset.NumpyDataset({\"x\": x_train, \"y\": y_train})\n\n x_eval, y_eval = np.concatenate([x_eval0, x_eval1]), np.concatenate([y_eval0, y_eval1])\n eval_data = fe.dataset.NumpyDataset({\"x\": x_eval, \"y\": y_eval})\n\n pipeline = fe.Pipeline(\n train_data=train_data,\n eval_data=eval_data,\n batch_size=batch_size,\n ops=[\n ExpandDims(inputs=\"x\", outputs=\"x\"),\n Normalize(inputs=\"x\", outputs=\"x\", mean=1.0, std=1.0, max_pixel_value=127.5),\n LambdaOp(fn=lambda x: x + np.random.normal(loc=0.0, scale=0.155, size=(28, 28, 1)).astype(np.float32),\n inputs=\"x\",\n outputs=\"x_w_noise\",\n mode=\"train\"),\n ChannelTranspose(inputs=\"x\", outputs=\"x\"),\n ChannelTranspose(inputs=\"x_w_noise\", outputs=\"x_w_noise\", mode=\"train\")\n ])\n\n recon_model = fe.build(model_fn=reconstructor,\n optimizer_fn=lambda x: torch.optim.RMSprop(x, lr=2e-4),\n model_name=\"reconstructor\")\n disc_model = fe.build(model_fn=discriminator,\n optimizer_fn=lambda x: torch.optim.RMSprop(x, lr=1e-4),\n model_name=\"discriminator\")\n\n network = fe.Network(ops=[\n ModelOp(model=recon_model, inputs=\"x_w_noise\", outputs=\"x_fake\", mode=\"train\"),\n ModelOp(model=recon_model, inputs=\"x\", outputs=\"x_fake\", mode=\"eval\"),\n ModelOp(model=disc_model, inputs=\"x_fake\", outputs=\"fake_score\"),\n ModelOp(model=disc_model, inputs=\"x\", outputs=\"true_score\"),\n RLoss(inputs=(\"fake_score\", \"x_fake\", \"x\"), outputs=\"rloss\"),\n UpdateOp(model=recon_model, loss_name=\"rloss\"),\n DLoss(inputs=(\"true_score\", \"fake_score\"), outputs=\"dloss\"),\n UpdateOp(model=disc_model, loss_name=\"dloss\"),\n ])\n\n traces = [\n F1AUCScores(true_key=\"y\", pred_key=\"fake_score\", mode=\"eval\", output_name=[\"auc_score\", \"f1_score\"]),\n BestModelSaver(model=recon_model, save_dir=save_dir, metric='f1_score', save_best_mode='max'),\n BestModelSaver(model=disc_model, save_dir=save_dir, metric='f1_score', save_best_mode='max'),\n ]\n\n estimator = fe.Estimator(pipeline=pipeline,\n network=network,\n epochs=epochs,\n traces=traces,\n max_train_steps_per_epoch=max_train_steps_per_epoch,\n log_steps=50)\n\n return estimator\n\n\nif __name__ == \"__main__\":\n est = get_estimator()\n est.fit()\n",
"# Copyright 2021 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport inspect\nimport random\nfrom typing import Any, Dict, Iterable, List, Tuple, Union\n\nimport numpy as np\nfrom PIL import Image, ImageOps\n\nfrom fastestimator.op.numpyop.meta.one_of import OneOf\nfrom fastestimator.op.numpyop.meta.sometimes import Sometimes\nfrom fastestimator.op.numpyop.numpyop import NumpyOp, forward_numpyop\nfrom fastestimator.op.numpyop.univariate.autocontrast import AutoContrast\nfrom fastestimator.op.numpyop.univariate.brightness import Brightness\nfrom fastestimator.op.numpyop.univariate.color import Color\nfrom fastestimator.op.numpyop.univariate.contrast import Contrast\nfrom fastestimator.op.numpyop.univariate.posterize import Posterize as PosterizeAug\nfrom fastestimator.op.numpyop.univariate.sharpness import Sharpness\nfrom fastestimator.op.numpyop.univariate.shear_x import ShearX\nfrom fastestimator.op.numpyop.univariate.shear_y import ShearY\nfrom fastestimator.op.numpyop.univariate.translate_x import TranslateX\nfrom fastestimator.op.numpyop.univariate.translate_y import TranslateY\nfrom fastestimator.util.traceability_util import traceable\nfrom fastestimator.util.util import param_to_range, to_list, to_set\n\n\n@traceable()\nclass Rotate(NumpyOp):\n \"\"\"Rotate the input by an angle selected randomly.\n\n This is a wrapper for functionality provided by the PIL library:\n https://github.com/python-pillow/Pillow/tree/master/src/PIL.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n limit: Range from which the angle can be picked. If limit is a single int the range is considered from\n (0, limit).\n\n Image types:\n uint8\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None,\n limit: Union[int, Tuple[int, int]] = 30):\n super().__init__(inputs=to_list(inputs), outputs=to_list(outputs), mode=mode, ds_id=ds_id)\n self.limit = param_to_range(limit)\n\n def set_rua_level(self, magnitude_coef: float) -> None:\n \"\"\"Set the augmentation intensity based on the magnitude_coef.\n\n This method is specifically designed to be invoked by the RUA Op.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n \"\"\"\n param_mid = (self.limit[1] + self.limit[0]) / 2\n param_extent = magnitude_coef * ((self.limit[1] - self.limit[0]) / 2)\n self.limit = (param_mid - param_extent, param_mid + param_extent)\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n degree = random.uniform(self.limit[0], self.limit[1])\n return [Rotate._apply_rotate(elem, degree) for elem in data]\n\n @staticmethod\n def _apply_rotate(data: np.ndarray, degree: float) -> np.ndarray:\n \"\"\"Rotate the image.\n\n Args:\n data: The image to be modified.\n degree: Angle for image rotation.\n\n Returns:\n The image after applying rotation.\n \"\"\"\n im = Image.fromarray(data)\n im = im.rotate(degree)\n return np.array(im)\n\n\n@traceable()\nclass Identity(NumpyOp):\n \"\"\"Pass the input as-is.\n\n Args:\n inputs: Key(s) of images.\n outputs: Key(s) into which to write the images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None):\n super().__init__(inputs=to_list(inputs), outputs=to_list(outputs), mode=mode, ds_id=ds_id)\n\n def set_rua_level(self, magnitude_coef: float) -> None:\n \"\"\"A method which will be invoked by the RUA Op to adjust the augmentation intensity.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n \"\"\"\n\n\n@traceable()\nclass Equalize(NumpyOp):\n \"\"\"Equalize the image histogram.\n\n This is a wrapper for functionality provided by the PIL library:\n https://github.com/python-pillow/Pillow/tree/master/src/PIL.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n\n Image types:\n uint8\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None):\n super().__init__(inputs=to_list(inputs), outputs=to_list(outputs), mode=mode, ds_id=ds_id)\n\n def set_rua_level(self, magnitude_coef: float) -> None:\n \"\"\"A method which will be invoked by the RUA Op to adjust the augmentation intensity.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n \"\"\"\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n return [Equalize._apply_equalize(elem) for elem in data]\n\n @staticmethod\n def _apply_equalize(data: np.ndarray) -> np.ndarray:\n \"\"\"Equalize the image histogram.\n\n Args:\n data: The image to be modified.\n\n Returns:\n The image after applying equalize.\n \"\"\"\n im = Image.fromarray(data)\n im = ImageOps.equalize(im)\n return np.array(im)\n\n\n@traceable()\nclass Posterize(PosterizeAug):\n \"\"\"Reduce the number of bits for the image.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n num_bits: Number of high bits. If num_bits is a single value, the range will be [num_bits, num_bits]. A triplet\n of ints will be interpreted as [r, g, b], and a triplet of pairs as [[r1, r1], [g1, g2], [b1, b2]]. Must be\n in the range [0, 8].\n\n Image types:\n uint8\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None,\n num_bits: Union[int,\n Tuple[int, int],\n Tuple[int, int, int],\n Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int]]] = 7):\n self.num_bits = num_bits\n super().__init__(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id, num_bits=num_bits)\n\n def set_rua_level(self, magnitude_coef: float) -> None:\n \"\"\"Set the augmentation intensity based on the magnitude_coef.\n\n This method is specifically designed to be invoked by the RUA Op.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n \"\"\"\n if isinstance(self.num_bits, tuple) and len(self.num_bits) == 3:\n num_bits = []\n for i in self.num_bits:\n num_bits.append(Posterize._range_tuple(num_bits=i, magnitude_coef=magnitude_coef))\n self.num_bits = tuple(num_bits)\n else:\n self.num_bits = Posterize._range_tuple(num_bits=self.num_bits, magnitude_coef=magnitude_coef)\n super().__init__(inputs=self.inputs,\n outputs=self.outputs,\n mode=self.mode,\n ds_id=self.ds_id,\n num_bits=self.num_bits)\n\n @staticmethod\n def _range_tuple(num_bits: Union[int, Tuple[int, int]], magnitude_coef: float) -> Tuple[int, int]:\n \"\"\"Process num_bits for posterization based on augmentation intensity.\n\n Args:\n num_bits: Number of high bits.\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n\n Returns:\n The range of high bits after adjusting augmentation intensity.\n \"\"\"\n if isinstance(num_bits, tuple):\n param_mid = (num_bits[0] + num_bits[1])/2\n param_extent = magnitude_coef * ((num_bits[1] - num_bits[0])/2)\n bits_range = (round(param_mid - param_extent), round(param_mid + param_extent))\n else:\n bits_range = (round(8-(magnitude_coef*num_bits)), 8)\n return bits_range\n\n\n@traceable()\nclass Solarize(NumpyOp):\n \"\"\"Invert all pixel values above a threshold.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n threshold: Range for the solarizing threshold. If threshold is a single value 't', the range will be [0, t].\n\n Image types:\n uint8\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None,\n threshold: Union[int, Tuple[int, int], float, Tuple[float, float]] = 256):\n super().__init__(inputs=to_list(inputs), outputs=to_list(outputs), mode=mode, ds_id=ds_id)\n self.threshold = threshold\n\n def set_rua_level(self, magnitude_coef: Union[int, float]) -> None:\n \"\"\"Set the augmentation intensity based on the magnitude_coef.\n\n This method is specifically designed to be invoked by the RUA Op.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n \"\"\"\n if isinstance(self.threshold, tuple):\n self.threshold = magnitude_coef * (self.threshold[1] - self.threshold[0]) + self.threshold[0]\n else:\n self.threshold = magnitude_coef * self.threshold\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n if isinstance(self.threshold, tuple):\n threshold = 256 - round(random.uniform(self.threshold[0], self.threshold[1]))\n else:\n threshold = 256 - round(random.uniform(0, self.threshold))\n return [Solarize._apply_solarize(elem, threshold) for elem in data]\n\n @staticmethod\n def _apply_solarize(data: np.ndarray, threshold: int) -> np.ndarray:\n \"\"\"Invert all pixel values of the image above a threshold.\n\n Args:\n data: The image to be modified.\n threshold: Solarizing threshold.\n\n Returns:\n The image after applying solarize.\n \"\"\"\n data = np.where(data < threshold, data, 255 - data)\n return data\n\n\n@traceable()\nclass OneOfMultiVar(OneOf):\n \"\"\"Perform one of several possible NumpyOps.\n\n Note that OneOfMultiVar accepts both univariate and multivariate ops and allows the list of passed NumpyOps to have\n different input and output keys. OneOfMultiVar should not be used to wrap an op whose output key(s) do not already\n exist in the data dictionary. This would result in a problem when future ops / traces attempt to reference the\n output key, but OneOfMultiVar declined to generate it. If you want to create a default value for a new key, simply\n use a LambdaOp before invoking the OneOfMultiVar.\n\n Args:\n *numpy_ops: A list of ops to choose between with uniform probability.\n \"\"\"\n def __init__(self, *numpy_ops: NumpyOp) -> None:\n inputs = to_set(numpy_ops[0].inputs)\n outputs = to_set(numpy_ops[0].outputs)\n mode = numpy_ops[0].mode\n ds_id = numpy_ops[0].ds_id\n self.in_list = numpy_ops[0].in_list\n self.out_list = numpy_ops[0].out_list\n for op in numpy_ops[1:]:\n assert self.in_list == op.in_list, \"All ops within OneOf must share the same input configuration\"\n assert self.out_list == op.out_list, \"All ops within OneOf must share the same output configuration\"\n assert mode == op.mode, \"All ops within a OneOf must share the same mode\"\n\n for inp in op.inputs:\n inputs.add(inp)\n\n for out in op.outputs:\n outputs.add(out)\n\n # Bypassing OneOf Op's restriction of same input and output key(s) on the list of passed NumpyOps.\n super(OneOf, self).__init__(inputs=inputs.union(outputs), outputs=outputs, mode=mode, ds_id=ds_id)\n self.ops = numpy_ops\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n data = {key: elem for key, elem in zip(self.inputs, data)}\n forward_numpyop([random.choice(self.ops)], data, state)\n return [data[key] for key in self.outputs]\n\n\n@traceable()\nclass RUA(NumpyOp):\n \"\"\"Apply RUA augmentation strategy.\n\n Note that all augmentation ops passed to RUA should have a set_rua_level method to modify their strength based on\n the level. Custom NumpyOps can be passed to the `choices` argument along with names of augmentations to add. Passing\n 'defaults' adds the default list of augmentations along with any custom NumpyOps specified by the user.\n The default augmentations are: 'Rotate', 'Identity', 'AutoContrast', 'Equalize', 'Posterize', 'Solarize',\n 'Sharpness', 'Contrast', 'Color', 'Brightness', 'ShearX', 'ShearY', 'TranslateX' and 'TranslateY'.\n To add specific augmentations from the default list, their names can be passed. Ex: 'Rotate'.\n To remove specific augmentations from the list, you can negate their names. Ex: '!Rotate' will load all the\n augmentations except 'Rotate'.\n\n Example combinations which are not allowed:\n choices = ['defaults', 'Rotate'] # augmentations from the default list are redundant with 'defaults'.\n choices = ['defaults', '!Rotate'] # negated augmentations automatically load the default list.\n choices = ['!Solarize', 'Rotate'] # Cannot mix negated and normal augmentations.\n\n RUA should not have augmentation ops whose output key(s) do not already exist in the data dictionary. This would\n result in a problem when future ops / traces attempt to reference the output key, but RUA declined to generate it.\n If you want to create a default value for a new key, simply use a LambdaOp before invoking RUA.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n choices: List of augmentations to apply.\n level: Factor to set the range for magnitude of augmentation. Must be in the range [0, 30].\n\n Image types:\n uint8\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None,\n choices: Union[str, NumpyOp, List[Union[str, NumpyOp]]] = \"defaults\",\n level: Union[int, float] = 18):\n self.default_aug_dict = {\n \"Rotate\": Rotate(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,limit=90),\n \"Identity\": Identity(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id),\n \"AutoContrast\": AutoContrast(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id),\n \"Equalize\": Equalize(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id),\n \"Posterize\": Posterize(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,num_bits=7),\n \"Solarize\": Solarize(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,threshold=256),\n \"Sharpness\": Sharpness(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,limit=0.9),\n \"Contrast\": Contrast(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,limit=0.9),\n \"Color\": Color(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,limit=0.9),\n \"Brightness\": Brightness(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,limit=0.9),\n \"ShearX\": ShearX(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,shear_coef=0.5),\n \"ShearY\": ShearY(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,shear_coef=0.5),\n \"TranslateX\": TranslateX(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,shift_limit=0.33),\n \"TranslateY\": TranslateY(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id,shift_limit=0.33)\n }\n aug_options = self._parse_aug_choices(magnitude_coef=(level / 30.), choices=to_list(choices))\n\n inputs, outputs = to_set(inputs), to_set(outputs)\n for op in aug_options:\n for inp in op.inputs:\n inputs.add(inp)\n\n for out in op.outputs:\n outputs.add(out)\n super().__init__(inputs=inputs.union(outputs), outputs=outputs, mode=mode, ds_id=ds_id)\n\n # Calculating number of augmentation to apply at each training iteration\n N_min = 1\n N_max = min(len(aug_options), 5)\n N = level * (N_max - N_min) / 30 + N_min\n N_guarantee, N_p = int(N), N % 1\n\n self.ops = [OneOfMultiVar(*aug_options) for _ in range(N_guarantee)]\n if N_p > 0:\n self.ops.append(Sometimes(OneOfMultiVar(*aug_options), prob=N_p))\n\n def _parse_aug_choices(self, magnitude_coef: float, choices: List[Union[str, NumpyOp]]) -> List[NumpyOp]:\n \"\"\"Parse the augmentation choices to determine the final list of augmentations to apply.\n\n Args:\n magnitude_coef: The desired augmentation intensity (range [0-1]).\n choices: List of augmentations to apply.\n\n Returns:\n List of augmentations to apply.\n\n Raises:\n AssertionError: If augmentations to add and remove are mixed.\n AttributeError: If augmentation choices don't have a 'set_rua_level' method.\n ValueError: If 'defaults' is provided with augmentation strings to add or remove, or wrong names are\n provided.\n \"\"\"\n custom_ops = [op for op in choices if not isinstance(op, str)]\n remove_ops = [op for op in choices if isinstance(op, str) and op.startswith(\"!\")]\n add_ops = [op for op in choices if isinstance(op, str) and not (op.startswith(\"!\") or (op == \"defaults\"))]\n aug_names = list(self.default_aug_dict.keys())\n\n assert len(remove_ops)==0 or len(add_ops)==0, \\\n \"RUA supports either add or remove ops, but not both. Found {} and {}\".format(add_ops, remove_ops)\n\n if len(remove_ops) > 0:\n if \"defaults\" in choices:\n raise ValueError(\"Can't provide 'defaults' value with ops to remove, found: {}\".format(remove_ops))\n remove_ops = [op[1:] for op in remove_ops]\n\n for op in remove_ops:\n if op not in aug_names:\n raise ValueError(\"Unable to remove {}, list of augmentations available: {}\".format(op, aug_names))\n\n aug_list = [aug for aug_name, aug in self.default_aug_dict.items() if aug_name not in remove_ops]\n else:\n if \"defaults\" in choices:\n if len(add_ops) > 0:\n raise ValueError(\"Can't pass 'defaults' value with default list's ops, found: {}\".format(add_ops))\n aug_list = list(self.default_aug_dict.values())\n elif len(add_ops) > 0:\n for op in add_ops:\n if op not in aug_names:\n raise ValueError(\"Unable to add {}, list of augmentations available: {}\".format(op, aug_names))\n\n aug_list = [self.default_aug_dict[aug_name] for aug_name in add_ops]\n else:\n aug_list = []\n aug_list = aug_list + custom_ops\n\n for op in aug_list:\n if hasattr(op, \"set_rua_level\") and inspect.ismethod(getattr(op, \"set_rua_level\")):\n op.set_rua_level(magnitude_coef=magnitude_coef)\n else:\n raise AttributeError(\n \"RUA Augmentations should have a 'set_rua_level' method but it's not present in Op: {}\".format(\n op.__class__.__name__))\n\n return aug_list\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n data = {key: elem for key, elem in zip(self.inputs, data)}\n forward_numpyop(self.ops, data, state)\n return [data[key] for key in self.outputs]\n"
] |
[
[
"numpy.round",
"numpy.argmax",
"sklearn.metrics.recall_score"
],
[
"tensorflow.is_tensor",
"numpy.maximum",
"numpy.min",
"numpy.uint8",
"numpy.max",
"numpy.mean",
"numpy.float32",
"numpy.moveaxis",
"numpy.array"
],
[
"numpy.concatenate",
"sklearn.metrics.f1_score",
"numpy.where",
"torch.nn.Sigmoid",
"numpy.copy",
"numpy.zeros",
"torch.ones_like",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.zeros_like",
"sklearn.metrics.roc_curve",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"sklearn.metrics.auc",
"numpy.absolute",
"tensorflow.keras.datasets.mnist.load_data",
"torch.nn.Tanh",
"torch.optim.RMSprop",
"numpy.random.normal",
"torch.nn.ReLU"
],
[
"numpy.array",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.3",
"2.4",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
als11044/trimesh
|
[
"a29735c47cf6a473ba77fdf8be0d3f6fd104c9fc",
"a29735c47cf6a473ba77fdf8be0d3f6fd104c9fc",
"a29735c47cf6a473ba77fdf8be0d3f6fd104c9fc"
] |
[
"trimesh/geometry.py",
"trimesh/path/simplify.py",
"trimesh/comparison.py"
] |
[
"import numpy as np\n\nfrom .transformations import rotation_matrix\nfrom .constants import tol, log\n\nfrom . import util\n\ntry:\n from scipy.sparse import coo_matrix\nexcept ImportError:\n log.warning('scipy.sparse.coo_matrix unavailable')\n\n\ndef plane_transform(origin, normal):\n '''\n Given the origin and normal of a plane, find the transform that will move\n that plane to be coplanar with the XY plane\n\n Parameters\n ----------\n origin: (3,) float, point in space\n normal: (3,) float, plane normal vector\n\n Returns\n ---------\n transform: (4,4) float, transformation matrix\n '''\n transform = align_vectors(normal, [0, 0, 1])\n transform[0:3, 3] = -np.dot(transform, np.append(origin, 1))[0:3]\n return transform\n\n\ndef transform_around(matrix, point):\n '''\n Given a transformation matrix, apply its rotation component around a\n point in space.\n\n Parameters\n ----------\n matrix: (4,4) float, transformation matrix\n point: (3,) float, point in space\n\n Returns\n ---------\n result: (4,4) transformation matrix\n '''\n point = np.array(point)\n translate = np.eye(4)\n translate[0:3, 3] = -point\n result = np.dot(matrix, translate)\n translate[0:3, 3] = point\n result = np.dot(translate, result)\n return result\n\n\ndef align_vectors(vector_start, vector_end, return_angle=False):\n '''\n Returns the 4x4 transformation matrix which will rotate from\n vector_start to vector_end, eg:\n\n vector_end == np.dot(T, np.append(vector_start, 1))[0:3]\n\n\n Parameters\n -----------\n vector_start: (3,) float, vector in space\n vector_end: (3,) float, vector in space\n return_angle: bool, return angle between vectors or not\n\n Returns\n -----------\n transform: (4,4) float, transformation matrix\n angle: float, angle in radians (only returned if flag set)\n\n '''\n start = np.asanyarray(vector_start, dtype=np.float64)\n start /= np.linalg.norm(start)\n end = np.asanyarray(vector_end, dtype=np.float64)\n end /= np.linalg.norm(end)\n\n cross = np.cross(start, end)\n # we clip the norm to 1, as otherwise floating point bs\n # can cause the arcsin to error\n norm = np.linalg.norm(cross)\n norm = np.clip(norm, -1.0, 1.0)\n direction = np.sign(np.dot(start, end))\n\n if norm < tol.zero:\n # if the norm is zero, the vectors are the same\n # and no rotation is needed\n T = np.eye(4)\n T[0:3] *= direction\n else:\n angle = np.arcsin(norm)\n if direction < 0:\n angle = np.pi - angle\n T = rotation_matrix(angle, cross)\n\n check = np.abs(np.dot(T[:3, :3], start) - end)\n if not (check < 1e-5).all():\n raise ValueError('aligning vectors failed!')\n\n if return_angle:\n return T, angle\n return T\n\n\ndef faces_to_edges(faces, return_index=False):\n '''\n Given a list of faces (n,3), return a list of edges (n*3,2)\n\n Parameters\n -----------\n faces: (n,3) int, vertex indices representing faces\n\n Returns\n -----------\n edges: (n*3, 2) int, vertex indices representing edges\n '''\n faces = np.asanyarray(faces)\n edges = np.column_stack((faces[:, (0, 1)],\n faces[:, (1, 2)],\n faces[:, (2, 0)])).reshape(-1, 2)\n if return_index:\n face_index = np.tile(np.arange(len(faces)), (3, 1)).T.reshape(-1)\n return edges, face_index\n return edges\n\n\ndef vector_angle(pairs):\n '''\n Find the angles between vector pairs\n\n Parameters\n ----------\n pairs: (n,2,3) set of vector pairs\n\n Returns\n ----------\n angles: (n,) float, angles between vectors\n\n Examples\n ----------\n angles = mesh.face_normals[mesh.face_adjacency]\n '''\n pairs = np.asanyarray(pairs)\n if not util.is_shape(pairs, (-1, 2, 3)):\n raise ValueError('pairs must be (n,2,3)!')\n dots = util.diagonal_dot(pairs[:, 0], pairs[:, 1])\n # clip for floating point error\n dots = np.clip(dots, -1.0, 1.0)\n angles = np.abs(np.arccos(dots))\n return angles\n\n\ndef triangulate_quads(quads):\n '''\n Given a set of quad faces, return them as triangle faces.\n\n Parameters\n -----------\n quads: (n,4) int, vertex indices of quad faces\n\n Returns\n -----------\n faces: (m,3) int, vertex indices of triangular faces\n '''\n if len(quads) == 0:\n return quads\n quads = np.asanyarray(quads)\n faces = np.vstack((quads[:, [0, 1, 2]],\n quads[:, [2, 3, 0]]))\n return faces\n\n\ndef mean_vertex_normals(vertex_count, faces, face_normals, **kwargs):\n '''\n Find vertex normals from the mean of the faces that contain that vertex.\n\n Parameters\n -----------\n vertex_count: int, the number of vertices faces refer to\n faces: (n,3) int, list of vertex indices\n face_normals: (n,3) float, normal vector for each face\n\n Returns\n -----------\n vertex_normals: (vertex_count, 3) float normals for every vertex\n Uncontained vertices will be zero.\n '''\n def summed_sparse():\n # use a sparse matrix of which face contains each vertex to\n # figure out the summed normal at each vertex\n # allow cached sparse matrix to be passed\n if 'sparse' in kwargs:\n sparse = kwargs['sparse']\n else:\n sparse = index_sparse(vertex_count, faces)\n summed = sparse.dot(face_normals)\n log.debug('Generated vertex normals using sparse matrix')\n return summed\n\n def summed_loop():\n # loop through every face, in tests was ~50x slower than\n # doing this with a sparse matrix\n summed = np.zeros((vertex_count, 3))\n for face, normal in zip(faces, face_normals):\n summed[face] += normal\n return summed\n\n try:\n summed = summed_sparse()\n except BaseException:\n log.warning('Unable to generate sparse matrix! Falling back!',\n exc_info=True)\n summed = summed_loop()\n unit_normals, valid = util.unitize(summed, check_valid=True)\n vertex_normals = np.zeros((vertex_count, 3), dtype=np.float64)\n vertex_normals[valid] = unit_normals\n\n return vertex_normals\n\n\ndef index_sparse(column_count, indices):\n '''\n Return a sparse matrix for which vertices are contained in which faces.\n\n Returns\n ---------\n sparse: scipy.sparse.coo_matrix of shape (column_count, len(faces))\n dtype is boolean\n\n Examples\n ----------\n In [1]: sparse = faces_sparse(len(mesh.vertices), mesh.faces)\n\n In [2]: sparse.shape\n Out[2]: (12, 20)\n\n In [3]: mesh.faces.shape\n Out[3]: (20, 3)\n\n In [4]: mesh.vertices.shape\n Out[4]: (12, 3)\n\n In [5]: dense = sparse.toarray().astype(int)\n\n In [6]: dense\n Out[6]:\n array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1],\n [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0],\n [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]])\n\n In [7]: dense.sum(axis=0)\n Out[7]: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3])\n '''\n indices = np.asanyarray(indices)\n column_count = int(column_count)\n\n row = indices.reshape(-1)\n col = np.tile(np.arange(len(indices)).reshape(\n (-1, 1)), (1, indices.shape[1])).reshape(-1)\n\n shape = (column_count, len(indices))\n data = np.ones(len(col), dtype=np.bool)\n sparse = coo_matrix((data, (row, col)),\n shape=shape,\n dtype=np.bool)\n return sparse\n\n\ndef medial_axis(samples, contains):\n '''\n Given a set of samples on a boundary, find the approximate medial axis based\n on a voronoi diagram and a containment function which can assess whether\n a point is inside or outside of the closed geometry.\n\n Parameters\n ----------\n samples: (n,d) set of points on the boundary of the geometry\n contains: function which takes (m,d) points and returns an (m) bool array\n\n Returns\n ----------\n lines: (n,2,2) set of line segments\n '''\n\n from scipy.spatial import Voronoi\n from .path.io.load import load_path\n\n # create the voronoi diagram, after vertically stacking the points\n # deque from a sequnce into a clean (m,2) array\n voronoi = Voronoi(samples)\n # which voronoi vertices are contained inside the original polygon\n contained = contains(voronoi.vertices)\n # ridge vertices of -1 are outside, make sure they are False\n contained = np.append(contained, False)\n inside = [i for i in voronoi.ridge_vertices if contained[i].all()]\n line_indices = np.vstack([util.stack_lines(i)\n for i in inside if len(i) >= 2])\n lines = voronoi.vertices[line_indices]\n return load_path(lines)\n",
"import numpy as np\n\nimport copy\nimport collections\n\nfrom . import arc\nfrom . import entities\n\nfrom ..nsphere import fit_nsphere\nfrom ..util import unitize, diagonal_dot\nfrom ..constants import log\nfrom ..constants import tol_path as tol\n\n\ndef fit_circle_check(points, scale, prior=None, final=False, verbose=False):\n '''\n Fit a circle, and reject the fit if:\n * the radius is larger than tol.radius_min*scale or tol.radius_max*scale\n * any segment spans more than tol.seg_angle\n * any segment is longer than tol.seg_frac*scale\n * the fit deviates by more than tol.radius_frac*radius\n * the segments on the ends deviate from tangent by more than tol.tangent\n\n Parameters\n ---------\n points: (n, d) set of points which represent a path\n prior: (center, radius) tuple for best guess, or None if unknown\n scale: float, what is the overall scale of the set of points\n verbose: boolean, if True output log.debug messages for the reasons\n for fit rejection. Potentially generates hundreds of thousands of\n messages so only suggested in manual debugging.\n\n Returns\n ---------\n if fit is acceptable:\n (center, radius) tuple\n else:\n None\n '''\n # an arc needs at least three points\n if len(points) < 3:\n return None\n\n # do a least squares fit on the points\n C, R, r_deviation = fit_nsphere(points, prior=prior)\n\n # check to make sure radius is between min and max allowed\n if not tol.radius_min < (R / scale) < tol.radius_max:\n if verbose:\n log.debug('circle fit error: R %f', R / scale)\n return None\n\n # check point radius error\n r_error = r_deviation / R\n if r_error > tol.radius_frac:\n if verbose:\n log.debug('circle fit error: fit %s', str(r_error))\n return None\n\n vectors = np.diff(points, axis=0)\n segment = np.linalg.norm(vectors, axis=1)\n\n # approximate angle in radians, segments are linear length\n # not arc length but this is close and avoids a cosine\n angle = segment / R\n\n if (angle > tol.seg_angle).any():\n if verbose:\n log.debug('circle fit error: angle %s', str(angle))\n return None\n\n if final and (angle > tol.seg_angle_min).sum() < 3:\n log.debug('final: angle %s', str(angle))\n return None\n\n # check segment length as a fraction of drawing scale\n scaled = segment / scale\n\n if (scaled > tol.seg_frac).any():\n if verbose:\n log.debug('circle fit error: segment %s', str(scaled))\n return None\n\n # check to make sure the line segments on the ends are actually\n # tangent with the candidate circle fit\n mid_pt = points[[0, -2]] + (vectors[[0, -1]] * .5)\n radial = unitize(mid_pt - C)\n ends = unitize(vectors[[0, -1]])\n tangent = np.abs(np.arccos(diagonal_dot(radial, ends)))\n tangent = np.abs(tangent - np.pi / 2).max()\n if tangent > tol.tangent:\n if verbose:\n log.debug('circle fit error: tangent %f',\n np.degrees(tangent))\n return None\n\n return (C, R)\n\n\ndef is_circle(points, scale, verbose=False):\n '''\n Given a set of points, quickly determine if they represent\n a circle or not.\n '''\n\n # make sure input is a numpy array\n points = np.asanyarray(points)\n scale = float(scale)\n\n # can only be a circle if the first and last point are the\n # same (AKA is a closed path)\n if np.linalg.norm(points[0] - points[-1]) > tol.merge:\n return None\n\n box = points.ptp(axis=0)\n # the bounding box size of the points\n # check aspect ratio as an early exit if the path is not a circle\n aspect = np.divide(*box)\n if np.abs(aspect - 1.0) > tol.aspect_frac:\n return None\n\n # fit a circle with tolerance checks\n CR = fit_circle_check(points, scale=scale)\n if CR is None:\n return None\n\n # return the circle as three control points\n control = arc.angles_to_threepoint([0, np.pi * .5], *CR)\n return control\n\n\ndef merge_colinear(points, scale=None):\n '''\n Given a set of points representing a path in space,\n merge points which are colinear.\n\n Parameters\n ----------\n points: (n, d) set of points (where d is dimension)\n scale: float, scale of drawing\n\n Returns\n ----------\n merged: (j, d) set of points with colinear and duplicate\n points merged, where (j < n)\n '''\n points = np.array(points)\n if scale is None:\n scale = np.ptp(points, axis=0).max()\n\n # the vector from one point to the next\n direction = points[1:] - points[:-1]\n # the length of the direction vector\n direction_norm = np.linalg.norm(direction, axis=1)\n # make sure points don't have zero length\n direction_ok = direction_norm > tol.merge\n\n # remove duplicate points\n points = np.vstack((points[0], points[1:][direction_ok]))\n direction = direction[direction_ok]\n direction_norm = direction_norm[direction_ok]\n\n # create a vector between every other point, then turn it perpendicular\n # if we have points A B C D\n # and direction vectors A-B, B-C, etc\n # these will be perpendicular to the vectors A-C, B-D, etc\n perpendicular = (points[2:] - points[:-2]).T[::-1].T\n perpendicular /= np.linalg.norm(perpendicular, axis=1).reshape((-1, 1))\n\n # find the projection of each direction vector\n # onto the perpendicular vector\n projection = np.abs(diagonal_dot(perpendicular, direction[:-1]))\n\n projection_ratio = np.max((projection / direction_norm[1:],\n projection / direction_norm[:-1]), axis=0)\n\n mask = np.ones(len(points), dtype=np.bool)\n # since we took diff, we need to offset by one\n mask[1:-1][projection_ratio < 1e-4 * scale] = False\n\n merged = points[mask]\n return merged\n\n\ndef resample_spline(points, smooth=.001, count=None, degree=3):\n '''\n Resample a path in space, smoothing along a b-spline.\n\n Parameters\n -----------\n points: (n, dimension) float, points in space\n smooth: float, smoothing amount\n count: number of samples in output\n degree: int, degree of spline polynomial\n\n Returns\n ---------\n resampled: (count, dimension) float, points in space\n '''\n from scipy.interpolate import splprep, splev\n if count is None:\n count = len(points)\n points = np.asanyarray(points)\n closed = np.linalg.norm(points[0] - points[-1]) < tol.merge\n\n tpl = splprep(points.T, s=smooth, k=degree)[0]\n i = np.linspace(0.0, 1.0, count)\n resampled = np.column_stack(splev(i, tpl))\n\n if closed:\n shared = resampled[[0, -1]].mean(axis=0)\n resampled[0] = shared\n resampled[-1] = shared\n\n return resampled\n\n\ndef points_to_spline_entity(points, smooth=.0005, count=None):\n '''\n Create a spline entity from a curve in space\n\n Parameters\n -----------\n points: (n, dimension) float, points in space\n smooth: float, smoothing amount\n count: int, number of samples in result\n\n Returns\n ---------\n entity: entities.BSpline object with points indexed at zero\n control: (m, dimension) float, new vertices for entity\n '''\n\n from scipy.interpolate import splprep\n if count is None:\n count = len(points)\n points = np.asanyarray(points)\n closed = np.linalg.norm(points[0] - points[-1]) < tol.merge\n\n knots, control, degree = splprep(points.T, s=smooth)[0]\n control = np.transpose(control)\n index = np.arange(len(control))\n\n if closed:\n control[0] = control[[0, -1]].mean(axis=0)\n control = control[:-1]\n index[-1] = index[0]\n\n entity = entities.BSpline(points=index,\n knots=knots,\n closed=closed)\n\n return entity, control\n\n\ndef three_point(indices):\n '''\n Given a long list of ordered indices,\n return the first, middle and last.\n\n Parameters\n -----------\n indices: (n,) array\n\n Returns\n ----------\n three: (3,) array\n '''\n three = [indices[0],\n indices[int(len(indices) / 2)],\n indices[-1]]\n return np.array(three)\n\n\ndef simplify_basic(drawing):\n '''\n Merge colinear segments and fit circles.\n\n Parameters\n -----------\n drawing: Path2D object\n\n Returns\n -----------\n simplified: Path2D with circles.\n '''\n\n if any(i.__class__.__name__ != 'Line' for i in drawing.entities):\n log.debug('Path contains non- linear entities, skipping')\n return drawing\n\n # we are going to do a bookkeeping to avoid having\n # to recompute literally everything when simplification is ran\n cache = copy.deepcopy(drawing._cache)\n\n # store new values\n vertices_new = collections.deque()\n entities_new = collections.deque()\n\n for polygon in drawing.polygons_closed:\n\n # clean up things like self intersections\n buffered = polygon.buffer(0.0)\n # get the exterior as an (n,2) array\n # since we generated these from the closed\n points = merge_colinear(np.array(buffered.exterior.coords),\n scale=drawing.scale)\n # check to see if the closed entity represents a circle\n circle = is_circle(points,\n scale=drawing.scale)\n\n if circle is not None:\n # the points are circular enough for our high standards\n # so replace them with a closed Arc entity\n entities_new.append(entities.Arc(points=np.arange(3) +\n len(vertices_new),\n closed=True))\n vertices_new.extend(circle)\n else:\n # save this path as a closed Line entity\n # we cleaned up colinear points so it will still\n # be simpler than the source data\n indexes = np.arange(len(points)) + len(vertices_new)\n entities_new.append(entities.Line(points=indexes))\n vertices_new.extend(points)\n\n # create the new drawing object\n simplified = type(drawing)(entities=entities_new,\n vertices=vertices_new)\n\n # we have changed every path to a single closed entity\n # either a closed arc, or a closed line\n # therefore all closed paths are now represented by a single entity\n cache.cache.update({'paths': np.arange(len(entities_new)).reshape((-1, 1)),\n 'path_valid': np.ones(len(entities_new), dtype=np.bool),\n 'dangling': np.array([])})\n simplified._cache = cache\n # set the cache ID so it won't dump when a value is requested\n simplified._cache.id_set()\n\n return simplified\n",
"import numpy as np\n\nfrom . import util\n\nfrom .constants import tol\n\n\n# how many signifigant figures to use for each field of the identifier\nidentifier_sigfig = (5, # mesh volume, pretty stable\n 3, # mesh area\n 1, # ratio of original mesh volume to convex hull volume\n 6, # euler number of mesh- topological integer\n 2) # 99.99 percentile vertex radius\n\n\ndef identifier_simple(mesh):\n '''\n Return a basic identifier for a mesh, consisting of properties\n that are somewhat robust to transformation and noise.\n\n These include:\n -volume\n -surface area\n -convex hull surface area\n -euler number\n -average radius\n\n Parameters\n ----------\n mesh: Trimesh object\n\n Returns\n ----------\n identifier: (5,) float, properties of mesh\n '''\n identifier = np.array([mesh.volume,\n mesh.area,\n mesh.volume / mesh.convex_hull.volume,\n mesh.euler_number,\n 0.0],\n dtype=np.float64)\n\n if mesh.is_watertight:\n origin = mesh.center_mass\n else:\n origin = mesh.centroid\n vertex_radii = ((mesh.vertices - origin) ** 2).sum(axis=1)\n identifier[-1] = np.percentile(vertex_radii, 99.99)\n\n return identifier\n\n\ndef identifier_hash(identifier, sigfig=None):\n '''\n Hash an identifier array to a specified number of signifigant figures.\n\n Parameters\n ----------\n identifier: (n,) float\n sigfig: (n,) int\n\n Returns\n ----------\n md5: str, MD5 hash of identifier\n '''\n if sigfig is None:\n sigfig = identifier_sigfig\n as_int, multiplier = util.sigfig_int(identifier, sigfig)\n if (multiplier < 0).any():\n multiplier += np.abs(multiplier.min())\n hashable = (as_int * (10 ** multiplier)).astype(np.int64)\n md5 = util.md5_object(hashable)\n return md5\n"
] |
[
[
"numpy.dot",
"scipy.sparse.coo_matrix",
"scipy.spatial.Voronoi",
"numpy.clip",
"numpy.arcsin",
"numpy.eye",
"numpy.linalg.norm",
"numpy.arccos",
"numpy.append",
"numpy.asanyarray",
"numpy.cross",
"numpy.array",
"numpy.zeros",
"numpy.column_stack",
"numpy.vstack"
],
[
"numpy.abs",
"numpy.linspace",
"numpy.arange",
"numpy.vstack",
"scipy.interpolate.splprep",
"numpy.degrees",
"numpy.linalg.norm",
"numpy.ptp",
"scipy.interpolate.splev",
"numpy.max",
"numpy.asanyarray",
"numpy.diff",
"numpy.transpose",
"numpy.array",
"numpy.divide"
],
[
"numpy.array",
"numpy.percentile"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Bondify/gtfs_functions
|
[
"4cd237fe5d326219428018ff0cd58152bceadf73"
] |
[
"build/lib/gtfs_functions/gtfs_funtions.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 10 15:20:33 2020\r\n@author: santi\r\n\"\"\"\r\n\r\ndef save_gdf(data, file_name, geojson=False, shapefile=True):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import zipfile\r\n import os\r\n \r\n geojson_path = file_name + '.geojson'\r\n shape_path = file_name + '.shp'\r\n zip_path = file_name + '.zip'\r\n\r\n # -------------------------------------------------------\r\n # ----------- Save geojson (it's lighter) ---------------\r\n # -------------------------------------------------------\r\n if geojson:\r\n data.to_file(\r\n filename = geojson_path, \r\n driver=\"GeoJSON\"\r\n )\r\n\r\n # -------------------------------------------------------\r\n # ----------------- Save shapefile ----------------------\r\n # -------------------------------------------------------\r\n if shapefile:\r\n data.to_file(\r\n driver = 'ESRI Shapefile',\r\n filename = shape_path,\r\n )\r\n # create the .prj file\r\n prj_name = file_name + '.prj'\r\n prj = open(prj_name, \"w\")\r\n \r\n prj_write = 'GEOGCS[\"GCS_WGS_1984\",DATUM[\"D_WGS_1984\",SPHEROID[\"WGS_1984\",6378137,298.257223563]],PRIMEM[\"Greenwich\",0],UNIT[\"Degree\",0.017453292519943295]]'\r\n # call the function and supply the epsg code\r\n prj.write(prj_write)\r\n prj.close()\r\n \r\n if shapefile:\r\n extensions = ['.cpg', '.dbf','.prj', '.shp', '.shx']\r\n \r\n zipObj = zipfile.ZipFile(zip_path, 'w')\r\n \r\n for ex in extensions:\r\n zipObj.write(file_name + ex) \r\n os.remove(file_name + ex) # in case I want to remove the files out of the shapefile\r\n \r\n zipObj.close()\r\n \r\n \r\ndef import_gtfs(gtfs_path, busiest_date = True):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import os\r\n import pandas as pd\r\n import zipfile\r\n\r\n try:\r\n import partridge as ptg \r\n except ImportError as e:\r\n os.system('pip install partridge')\r\n import partridge as ptg\r\n # Partridge to read the feed\r\n # service_ids = pd.read_csv(gtfs_path + '/trips.txt')['service_id'].unique()\r\n # service_ids = frozenset(tuple(service_ids))\r\n \r\n if busiest_date:\r\n service_ids = ptg.read_busiest_date(gtfs_path)[1]\r\n else:\r\n with zipfile.ZipFile(gtfs_path) as myzip:\r\n myzip.extract(\"trips.txt\")\r\n service_ids = pd.read_csv('trips.txt')['service_id'].unique()\r\n service_ids = frozenset(tuple(service_ids))\r\n os.remove('trips.txt')\r\n \r\n view = {'trips.txt': {'service_id': service_ids}}\r\n \r\n feed = ptg.load_geo_feed(gtfs_path, view)\r\n \r\n routes = feed.routes\r\n trips = feed.trips\r\n stop_times = feed.stop_times\r\n stops = feed.stops\r\n shapes = feed.shapes\r\n \r\n # Get routes info in trips\r\n trips = pd.merge(trips, routes, how='left').loc[:, ['trip_id', 'route_id',\r\n 'service_id', 'direction_id','shape_id']]\r\n \r\n # Get trips, routes and stops info in stop_times\r\n stop_times = pd.merge(stop_times, trips, how='left') \r\n stop_times = pd.merge(stop_times, stops, how='left')\r\n \r\n return routes, stops, stop_times, trips, shapes\r\n\r\ndef cut_gtfs(stop_times, stops, shapes):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import os\r\n import pandas as pd\r\n#--------------------------------------------------------\r\n os.system('apt install libspatialindex-dev')\r\n os.system('pip install rtree')\r\n#----------------------------------------------------------\r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n try:\r\n import utm\r\n except ImportError as e:\r\n os.system('pip install utm')\r\n import utm\r\n\r\n from shapely.ops import nearest_points\r\n from shapely.geometry import Point, LineString, MultiLineString, MultiPoint\r\n from shapely.ops import split\r\n from shapely import geometry, ops\r\n\r\n # Get the right epsg code for later conversations\r\n shapes.crs = {'init':'epsg:4326'}\r\n\r\n lat = shapes.geometry.iloc[0].coords[0][1]\r\n lon = shapes.geometry.iloc[0].coords[0][0]\r\n\r\n zone = utm.from_latlon(lat, lon)\r\n\r\n def code(zone):\r\n #The EPSG code is 32600+zone for positive latitudes and 32700+zone for negatives.\r\n if lat <0:\r\n epsg_code = 32700 + zone[2]\r\n else:\r\n epsg_code = 32600 + zone[2]\r\n return epsg_code\r\n\r\n epsg = code(zone)\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # --------------------- FIND THE CLOSEST POINT TO EACH LINE --------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------ \r\n\r\n # Data frame with stop sequence for route and direction\r\n sseq = stop_times.drop_duplicates(subset=['stop_id','stop_name', 'stop_sequence', 'shape_id'])[['route_id','direction_id','stop_id','stop_name', 'stop_sequence', 'shape_id']]\r\n\r\n # Data frames with the number of stops for each route and direction and shape_id\r\n route_shapes = sseq.pivot_table('stop_id',\r\n index = ['route_id', 'direction_id', 'shape_id'],\r\n aggfunc='count').reset_index()\r\n route_shapes.columns = ['route_id','direction_id', 'shape_id', 'stops_count']\r\n\r\n # List of shape_ids\r\n shape_id_list = shapes.shape_id.unique()\r\n\r\n # Create a DataFrame with the pair (stop, nearest_point) for each shape_id\r\n def find_shape_closest_points(shape_id):\r\n #shape_id = row.shape_id\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n # Look for the shape\r\n shape = shapes.loc[shapes.shape_id == shape_id,'geometry'].values[0]\r\n\r\n\r\n # Look for the stop_ids of this shape\r\n route_stop_ids = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n &(sseq['shape_id'] == shape_id)]\r\n\r\n # Look for the geometry of these stops\r\n # merged = pd.merge(route_stop_ids, stops, how='left')\r\n # route_stop_geom = merged.geometry\r\n route_stop_geom = pd.merge(route_stop_ids, stops, how='left').geometry\r\n\r\n # Look for the nearest points of these stops that are in the shape\r\n points_in_shape = route_stop_geom.apply(lambda x: nearest_points(x, shape))\r\n\r\n d = dict(shape_id=shape_id, points=list(points_in_shape))\r\n\r\n return d\r\n\r\n shape_closest_points = [find_shape_closest_points(s) for s in shape_id_list]\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # --------------------- CREATE LINES THAT CUT THE SHAPE ------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n\r\n shape_trans_lines = pd.DataFrame()\r\n # First we define a function that will help us create the line to intersect the shape\r\n\r\n # ---------------- THIS IS THE VALUE YOU SHOULD CHANGE IF THE CUTTING GEOMETRY AND ---\r\n # ---------------- THE LINE INTERSECT -------------------------------------------------\r\n offset = 0.0001\r\n\r\n def create_line(row):\r\n # Formula to make the line longer\r\n # a = (y1-b)/x1\r\n # b = (y2-x2/x1*y1)/(1-x2/x1)\r\n if row[0] == row[1]:\r\n x1 = row[0].x - offset\r\n y1 = row[0].y - offset\r\n\r\n x2 = row[0].x \r\n y2 = row[0].y\r\n\r\n x3 = row[0].x + offset\r\n y3 = row[0].y + offset\r\n\r\n else: \r\n x1 = row[0].x\r\n y1 = row[0].y\r\n\r\n x2 = row[1].x\r\n y2 = row[1].y\r\n\r\n # If x2==x1 it will give the error \"ZeroDivisionError\"\r\n if float(x2) != float(x1):\r\n b = (y2-x2/x1*y1)/(1-x2/x1)\r\n a = (y1-b)/x1\r\n\r\n if x2 - x1 < 0: # We should create an \"if\" to check if we need to do -1 or +1 depending on x2-x1\r\n x3 = x2 - 3*(x1 - x2)#offset\r\n else:\r\n x3 = x2 + 3*(x2 - x1)#offset\r\n\r\n y3 = a*x3 + b\r\n\r\n else:\r\n x3 = x2\r\n b = 0\r\n a = 0\r\n\r\n if y2-y1 < 0:\r\n #y3 = y2 - offset/5\r\n y3 = y2 - 3*(y1-y2) #offset/10000000\r\n else: \r\n #y3 = y2 + offset/5\r\n y3 = y2 + 3*(y2-y1) #offset/10000000\r\n\r\n trans = LineString([Point(x1,y1), Point(x2,y2), Point(x3, y3)])\r\n return trans\r\n\r\n # For each shape we need to create transversal lines and separete the shape in segments \r\n def find_shape_trans_lines(shape_closest_points):\r\n # Choose the shape\r\n shape_id = shape_closest_points['shape_id']\r\n\r\n # Choose the pair (stop, nearest point to shape) to create the line\r\n scp = shape_closest_points['points']\r\n\r\n lines = [create_line(p) for p in scp]\r\n # scp.apply(create_line)\r\n\r\n d = dict(shape_id=shape_id, trans_lines=lines)\r\n\r\n return d\r\n\r\n shape_trans_lines = [find_shape_trans_lines(shape_closest_points[i]) for i in range(0, len(shape_closest_points))]\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------ CUT THE SHAPES --------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # Set the tolerance of the cuts\r\n tolerance = 0.0001\r\n\r\n loops_route_id = []\r\n loops_direction_id = []\r\n loops_shape_id = []\r\n\r\n def cut_shapes_(shape_trans_lines, shape_closest_points):\r\n shape_id = shape_trans_lines['shape_id']\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n # Check if the line is simple (ie, doesn't intersect itself)\r\n line = shapes.loc[shapes.shape_id == shape_id, 'geometry'].values[0]\r\n if line.is_simple:\r\n # Split the shape in different segments\r\n trans_lines = shape_trans_lines['trans_lines']\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n\r\n #df['segment'] = ''\r\n\r\n d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n\r\n if len(trans_lines) == 2:\r\n # In case there is a line with only two stops\r\n d['segment'] = [line]\r\n return d\r\n\r\n else:\r\n # trans_lines_all = MultiLineString(list(trans_lines.values))\r\n # trans_lines_cut = MultiLineString(list(trans_lines.values)[1:-1])\r\n\r\n # # Split the shape in different segments, cut by the linestrings created before\r\n # # The result is a geometry collection with the segments of the route\r\n # result = split(line, trans_lines_cut)\r\n try:\r\n trans_lines_all = MultiLineString(trans_lines)\r\n trans_lines_cut = MultiLineString(trans_lines[1:-1])\r\n\r\n # Split the shape in different segments, cut by the linestrings created before\r\n # The result is a geometry collection with the segments of the route\r\n result = split(line, trans_lines_cut)\r\n except ValueError:\r\n # If the cut points are on the line then try to cut with the points instead of lines\r\n test = shape_closest_points['points']\r\n cut_points = [test[i][1] for i in range(len(test))]\r\n cut_points = MultiPoint(cut_points[1:-1])\r\n result = split(line, cut_points)\r\n\r\n if len(result)==len(trans_lines_all)-1:\r\n d['segment'] = [s for s in result]\r\n\r\n return d\r\n else:\r\n loops_route_id.append(route_id)\r\n loops_direction_id.append(direction_id)\r\n loops_shape_id.append(shape_id) \r\n else:\r\n loops_route_id.append(route_id)\r\n loops_direction_id.append(direction_id)\r\n loops_shape_id.append(shape_id)\r\n\r\n segments = [cut_shapes_(shape_trans_lines[i], shape_closest_points[i]) for i in range(0, len(shape_trans_lines))]\r\n\r\n # Remove None values\r\n segments = [i for i in segments if i] \r\n\r\n loops = pd.DataFrame()\r\n loops['route_id'] = loops_route_id\r\n loops['direction_id'] = loops_direction_id\r\n loops['shape_id'] = loops_shape_id\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------- CUT THE SHAPES WITH LOOPS --------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n\r\n # Manage the lines with loops\r\n shapes_loop = shapes.loc[shapes.shape_id.isin(loops_shape_id)]\r\n\r\n aux = pd.DataFrame.from_dict(shape_trans_lines)\r\n trans_loop = aux.loc[aux.shape_id.isin(loops_shape_id)]\r\n\r\n aux = pd.DataFrame.from_dict(shape_closest_points)\r\n cut_points_loop = aux.loc[aux.shape_id.isin(loops_shape_id)]\r\n\r\n # Separate the shapes according to possible exceptions\r\n trans_loop['n_segments'] = trans_loop['trans_lines'].map(len)\r\n run_shapes_no_middle = False\r\n run_shapes_one_seg = False\r\n\r\n # Exception 1: Only three stops --> one cut point, two segments\r\n # If there's only one cut_point this will make the\r\n # script skip the \"Middle segments\" part\r\n # (with only one cut point there are only two segments)\r\n\r\n shapes_no_middle = shapes.loc[shapes.shape_id.isin(trans_loop.loc[trans_loop['n_segments'] ==3, 'shape_id'].unique())].reset_index()\r\n\r\n if len(shapes_no_middle) > 0:\r\n run_shapes_no_middle = True\r\n\r\n # Exception 2: Only two stops --> no cut points, one segments\r\n shapes_one_seg = shapes.loc[shapes.shape_id.isin(trans_loop.loc[trans_loop['n_segments'] ==2, 'shape_id'].unique())].reset_index()\r\n\r\n if len(shapes_one_seg) > 0 :\r\n run_shapes_one_seg = True\r\n\r\n # The rest of the shapes\r\n shapes_ok = shapes.loc[shapes.shape_id.isin(trans_loop.loc[trans_loop['n_segments'] >3, 'shape_id'].unique())].reset_index()\r\n\r\n def add_points(row, add_p, cut_points_gdf):\r\n # Calculate the min distance between the stops that intersect this segment\r\n index_track_ = row.name\r\n p = cut_points_gdf.loc[cut_points_gdf.index.isin(add_p.loc[add_p.index_track_==index_track_, 'index_cut'])]\r\n p.crs={'init':'epsg:4326'}\r\n\r\n seg = [LineString([p.geometry.values[i], p.geometry.values[i+1]]) for i in range(0,len(p)-1)]\r\n seg = gpd.GeoSeries(seg)\r\n seg.crs={'init':'epsg:4326'}\r\n dist = seg.to_crs(epsg).length.min() - 5\r\n\r\n\r\n gse = gpd.GeoSeries(row.geometry, index=[row.distance_m])\r\n gse.crs = {'init':'epsg:4326'}\r\n gse = gse.to_crs(epsg)\r\n\r\n length = gse.index[0]\r\n start = gse.values[0].coords[0]\r\n end = gse.values[0].coords[-1]\r\n\r\n num_vert = int(length/dist)\r\n\r\n new_points = [start] + [gse.values[0].interpolate(dist*n) for n in list(range(1, num_vert+1))] + [end]\r\n new_points = [Point(p) for p in new_points]\r\n new_line = LineString(new_points)\r\n\r\n check = gpd.GeoSeries([new_line])\r\n check.crs = {'init':'epsg:{}'.format(epsg)}\r\n check = check.to_crs(epsg=4326)\r\n return check[0]\r\n\r\n # Loop lines with more than three stops\r\n def cut_loops_shapes_ok(shape_id):\r\n # Set the ids\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n #d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id), stop_sequence=list(df.stop_sequence))\r\n\r\n # All the necessary information to split the line\r\n # 1- line to be cut\r\n # 2- transversal lines to cut\r\n # 3- closest point on the line\r\n\r\n line = shapes_ok.loc[shapes_ok.shape_id == shape_id, 'geometry'].values[0] \r\n cut_lines = trans_loop.loc[trans_loop.shape_id==shape_id,'trans_lines'].values[0][1:-1] \r\n cut_points = [x[1] for x in cut_points_loop.loc[cut_points_loop.shape_id==shape_id,'points'].values[0][1:-1]]\r\n\r\n cut_gdf = gpd.GeoDataFrame(data=list(range(len(cut_lines))), geometry=cut_lines)\r\n cut_points_gdf = gpd.GeoDataFrame(data=list(range(len(cut_points))), geometry=cut_points)\r\n\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # Make sure the shapes has a point every 100m\r\n # Create a GeoDataFrame with two point segments of the shape and its distance in meters\r\n shape = line.coords\r\n # Create two point segments for the shape\r\n track_l = gpd.GeoSeries([LineString([shape[i], shape[i+1]]) for i in range(0, len(shape)-1)])\r\n track_l.crs={'init':'epsg:4326'}\r\n #Calculate the length of each two point segment in meters\r\n track_dist = track_l.to_crs(epsg=epsg).length\r\n # Create the dataframe\r\n track_l_gdf = gpd.GeoDataFrame(data=dict(distance_m = track_dist), geometry = track_l)\r\n\r\n # Check where stops are closer than points of the track\r\n # To do that we intersect each segment between two segments of the track with our cut lines\r\n how_many = gpd.sjoin(track_l_gdf, cut_gdf, how='left', op='intersects', lsuffix='left', rsuffix='right').reset_index()\r\n how_many.rename(columns=dict(index='index_track_', index_right = 'index_cut'), inplace=True)\r\n\r\n # The filter those that were intersected by more than one cut line\r\n how_manyp = how_many.pivot_table('geometry', index='index_track_', aggfunc='count').reset_index()\r\n how_manyp = how_manyp.loc[how_manyp.geometry>1]\r\n\r\n add_p = how_many.loc[how_many.index_track_.isin(how_manyp.index_track_.unique())]\r\n\r\n # Add intermediate points for segments with length > 100m\r\n track_l_gdf.loc[track_l_gdf.index.isin(how_manyp.index_track_.unique()), 'geometry'] = track_l_gdf.loc[track_l_gdf.index.isin(how_manyp.index_track_.unique())] .apply(lambda x: add_points(x, add_p, cut_points_gdf), axis=1)\r\n\r\n #track_l_gdf.loc[track_l_gdf.distance_m>dist, 'geometry'] = track_l_gdf.loc[track_l_gdf.distance_m>dist].apply(lambda x: add_points(x, dist), axis=1)\r\n\r\n # Take the points and create the LineString again\r\n t = [list(g.coords)[:-1] for g in track_l_gdf.geometry]\r\n flat_list = [item for sublist in t for item in sublist] + [track_l_gdf.geometry.tail(1).values[0].coords[-1]]\r\n\r\n line = LineString(flat_list) \r\n\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # First segment\r\n # We will use i to identify were the next segment should start\r\n for i in range(2, len(line.coords)):\r\n segment = LineString(line.coords[0:i])\r\n if segment.intersects(cut_lines[0]):\r\n points_to_stop = line.coords[0:i-1] + list(cut_points[0].coords)\r\n segment = LineString(points_to_stop)\r\n\r\n # Save the position of the point that makes it to the intersection\r\n #last_point = i\r\n last_point = i-1\r\n d['segment'] = [segment]\r\n #df.loc[0, 'segment'] = segment # assign the linestring to that segment\r\n\r\n break\r\n\r\n # Middle segments\r\n for l in range(1, len(cut_lines)):\r\n nearest_point = list(cut_points[l-1].coords) # segments always start in the one of the cut points\r\n start_iterator = last_point + 1 # start from the last point found in the previous segment\r\n\r\n for i in range(start_iterator, len(line.coords)+1):\r\n points_to_stop = nearest_point + line.coords[last_point:i] # keep adding points to extend the line\r\n segment = LineString(points_to_stop)\r\n\r\n if segment.intersects(cut_lines[l]): \r\n # if the line intersects with the cut line, define the segment\r\n # the segment goes from one cut point to the next one\r\n points_to_stop = nearest_point + line.coords[last_point:i-1] + list(cut_points[l].coords)\r\n segment = LineString(points_to_stop)\r\n\r\n # Save the position of the point that makes it to the intersection\r\n last_point = i-1\r\n d['segment'] = d['segment'] + [segment]\r\n break \r\n\r\n if i==(len(line.coords)):\r\n points_to_stop = nearest_point + list(cut_points[l].coords)\r\n segment = LineString(points_to_stop)\r\n d['segment'] = d['segment'] + [segment]\r\n\r\n # Last segment\r\n # We start at the last cut point and go all the way to the end\r\n nearest_point = list(cut_points[l].coords)\r\n points_to_stop = nearest_point + line.coords[last_point:len(line.coords)]\r\n segment = LineString(points_to_stop)\r\n\r\n d['segment'] = d['segment'] + [segment] \r\n\r\n return d\r\n\r\n segments1 = [cut_loops_shapes_ok(s) for s in shapes_ok.shape_id.unique()]\r\n # Remove None values\r\n segments1 = [i for i in segments1 if i] \r\n segments.extend(segments1)\r\n\r\n # Exception 1: Only three stops --> one cut point, two segments\r\n # If there's only one cut_point this will make the\r\n # script skip the \"Middle segments\" part\r\n # (with only one cut point there are only two segments)\r\n\r\n if run_shapes_no_middle:\r\n #for index, row in shapes_no_middle.iterrows():\r\n def cut_shapes_no_middle(shape_id):\r\n # Set the ids\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n d = dict(shape_id = shape_id, route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n\r\n # All the necessary information to split the line\r\n # 1- line to be cut\r\n # 2- transversal lines to cut\r\n # 3- closest point on the line\r\n\r\n line = shapes_no_middle.loc[shapes_no_middle.shape_id == shape_id, 'geometry'].values[0] \r\n cut_lines = trans_loop.loc[trans_loop.shape_id==shape_id,'trans_lines'].values[0][1:-1] \r\n cut_points = [x[1] for x in cut_points_loop.loc[cut_points_loop.shape_id==shape_id,'points'].values[0][1:-1]]\r\n\r\n # First segment\r\n # We will use i to identify were the next segment should start\r\n for i in range(2, len(line.coords)):\r\n segment = LineString(line.coords[0:i])\r\n\r\n if segment.intersects(cut_lines[0]):\r\n points_to_stop = line.coords[0:i-1] + list(cut_points[0].coords)\r\n segment = LineString(points_to_stop)\r\n\r\n # Save the position of the point that makes it to the intersection\r\n last_point = i\r\n d['segment'] = [segment]\r\n #df.loc[0, 'segment'] = segment # assign the linestring to that segment\r\n\r\n break\r\n\r\n # Last segment\r\n # We start at the last cut point and go all the way to the end\r\n nearest_point = list(cut_points[0].coords)\r\n points_to_stop = nearest_point + line.coords[last_point-1:len(line.coords)]\r\n segment = LineString(points_to_stop)\r\n\r\n d['segment'] = d['segment'] + [segment]\r\n\r\n return d\r\n\r\n # Apply the function\r\n segments2 = [cut_shapes_no_middle(s) for s in shapes_no_middle.shape_id.unique()]\r\n # Remove None values\r\n segments2 = [i for i in segments2 if i] \r\n segments.extend(segments2)\r\n\r\n # Exception 2: Only two stops --> no cut points, one segments\r\n if run_shapes_one_seg:\r\n #for index, row in shapes_one_seg.iterrows():\r\n def cut_shapes_one_seg(shape_id):\r\n # Set the ids\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n #df['segment'] = ''\r\n d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n\r\n line = shapes_one_seg.loc[shapes_one_seg.shape_id == shape_id, 'geometry'].values[0] \r\n d['segment'] = [line]\r\n return d\r\n\r\n # Apply function\r\n segments3 = [cut_shapes_one_seg(s) for s in shapes_one_seg.shape_id.unique()]\r\n # Remove None values\r\n segments3 = [i for i in segments3 if i] \r\n segments.extend(segments3)\r\n\r\n\r\n def format_shapes(s, last_id):\r\n df = pd.DataFrame()\r\n df['stop_sequence'] = s['stop_sequence']\r\n df['start_stop_id'] = s['stop_id']\r\n df['end_stop_id'] = s['stop_id'][1:] + [last_id]\r\n df['shape_id'] = s['shape_id']\r\n df['route_id'] = s['route_id']\r\n df['direction_id'] = s['direction_id']\r\n\r\n df['geometry'] = s['segment']\r\n\r\n return df\r\n\r\n df = pd.concat([format_shapes(s, sseq.loc[sseq.shape_id==s['shape_id']].tail(1).stop_id.values[0]) for s in segments])\r\n\r\n df = pd.merge(df, stops[['stop_id', 'stop_name']], left_on='start_stop_id', right_on='stop_id', how='left').drop('stop_id', axis=1)\r\n df.rename(columns=dict(stop_name='start_stop_name'), inplace=True)\r\n df = pd.merge(df, stops[['stop_id', 'stop_name']], left_on='end_stop_id', right_on='stop_id', how='left').drop('stop_id', axis=1)\r\n df.rename(columns=dict(stop_name='end_stop_name'), inplace=True)\r\n df['segment_id'] = df.start_stop_id + '-' + df.end_stop_id\r\n\r\n segments_gdf = gpd.GeoDataFrame(data = df.loc[:,['route_id','direction_id','stop_sequence','start_stop_name', 'end_stop_name', 'start_stop_id', 'end_stop_id','segment_id','shape_id']], geometry = df.geometry)\r\n\r\n segments_gdf.crs = {'init':'epsg:4326'}\r\n segments_gdf['distance_m'] = segments_gdf.geometry.to_crs(epsg=epsg).length\r\n\r\n return segments_gdf\r\n \r\ndef speeds_from_gtfs(routes, stop_times, segments_gdf, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import pandas as pd\r\n import math\r\n import os\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n routes = routes\r\n stop_times = stop_times\r\n \r\n # Get the runtime between stops\r\n stop_times.sort_values(by = ['trip_id', 'stop_sequence'], ascending = True, inplace=True)\r\n \r\n first_try = stop_times.loc[:,['trip_id', 'arrival_time']]\r\n first_try['trip_id_next'] = first_try['trip_id'].shift(-1)\r\n first_try['arrival_time_next'] = first_try['arrival_time'].shift(-1)\r\n \r\n def runtime(row):\r\n if row.trip_id == row.trip_id_next:\r\n runtime = (row.arrival_time_next - row.arrival_time)/3600\r\n else:\r\n runtime = 0\r\n \r\n return runtime\r\n \r\n first_try['runtime_h'] = first_try.apply(runtime, axis=1)\r\n \r\n if len(first_try) == len(stop_times):\r\n stop_times['runtime_h'] = first_try['runtime_h']\r\n \r\n stop_times.head(2)\r\n # Merge stop_times with segments_gdf to get the distance\r\n segments_gdf['direction_id'] = segments_gdf['direction_id'].map(int)\r\n segments_gdf['stop_sequence'] = segments_gdf['stop_sequence'].map(int)\r\n \r\n speeds = pd.merge(stop_times, segments_gdf[['route_id', 'direction_id', 'start_stop_id', 'stop_sequence', 'segment_id','shape_id', 'distance_m']], \r\n left_on = ['route_id', 'direction_id', 'stop_id', 'stop_sequence', 'shape_id'], \r\n right_on = ['route_id', 'direction_id', 'start_stop_id', 'stop_sequence', 'shape_id'],\r\n how = 'left').drop('start_stop_id', axis=1)\r\n \r\n speeds = speeds.loc[~speeds.distance_m.isnull(),\r\n ['trip_id', 'route_id', 'direction_id', 'shape_id', 'segment_id',\r\n 'arrival_time', 'departure_time', 'stop_id','stop_name',\r\n 'stop_sequence', 'runtime_h', 'distance_m','geometry']\r\n ]\r\n \r\n # Assign a time window to each row\r\n if max(cutoffs)<=24: \r\n speeds_ok = speeds.loc[speeds.departure_time < 24*3600]\r\n speeds_fix = speeds.loc[speeds.departure_time >= 24*3600]\r\n speeds_fix['departure_time'] = [d - 24*3600 for d in speeds_fix.departure_time]\r\n \r\n speeds = speeds_ok.append(speeds_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n \r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n \r\n speeds['departure_time'] = speeds['departure_time']/3600\r\n \r\n # Put each trips in the right window\r\n speeds['window'] = pd.cut(speeds['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n speeds = speeds.loc[~speeds.window.isnull()]\r\n speeds['window'] = speeds['window'].astype(str)\r\n \r\n # Calculate the speed\r\n speeds.loc[speeds.runtime_h == 0.0, 'runtime_h'] = speeds.loc[speeds.runtime_h != 0.0, 'runtime_h'].mean()\r\n speeds['speed'] = round(speeds['distance_m']/1000/speeds['runtime_h'])\r\n speeds = speeds.loc[~speeds.speed.isnull()]\r\n \r\n # Calculate average speed to modify outliers\r\n avg_speed_route = speeds.pivot_table('speed',\r\n index=['route_id', 'direction_id','window'],\r\n aggfunc='mean').reset_index()\r\n avg_speed_route.rename(columns={'speed':'avg_speed_route'}, inplace=True)\r\n # Assign average speed to outliers\r\n speeds = pd.merge(speeds, avg_speed_route, how='left')\r\n speeds.loc[speeds.speed>120,'speed'] = speeds.loc[speeds.speed>120,'avg_speed_route']\r\n \r\n # Calculate max speed per segment to have a free_flow reference\r\n max_speed_segment = speeds.pivot_table('speed',\r\n index = ['stop_id', 'direction_id'],\r\n aggfunc='max')\r\n max_speed_segment.rename(columns={'speed':'max_kmh'}, inplace=True)\r\n \r\n \r\n # Get the average per route, direction, segment and time of day\r\n speeds_agg = speeds.pivot_table(['speed', 'runtime_h', 'avg_speed_route'],\r\n index=['route_id', 'direction_id', 'segment_id', 'window'],\r\n aggfunc = 'mean'\r\n ).reset_index()\r\n speeds_agg['route_id'] = speeds_agg['route_id'].map(str)\r\n speeds_agg['direction_id'] = speeds_agg['direction_id'].map(int)\r\n \r\n data = pd.merge(speeds_agg, segments_gdf, \r\n left_on=['route_id', 'direction_id', 'segment_id'],\r\n right_on = ['route_id', 'direction_id', 'segment_id'],\r\n how='left').reset_index().sort_values(by = ['route_id', 'direction_id','window','stop_sequence',], ascending=True)\r\n \r\n data.drop(['index'], axis=1, inplace=True)\r\n \r\n # Route name\r\n routes['route_name'] = ''\r\n if routes.route_short_name.isnull().unique()[0]:\r\n routes['route_name'] = routes.route_long_name\r\n elif routes.route_long_name.isnull().unique()[0]: \r\n routes['route_name'] = routes.route_short_name\r\n else:\r\n routes['route_name'] = routes.route_short_name + ' ' + routes.route_long_name\r\n data = pd.merge(data, routes[['route_id', 'route_name']], left_on='route_id', right_on='route_id', how='left')\r\n \r\n # Get the average per segment and time of day\r\n # Then add it to the rest of the data\r\n \r\n all_lines = speeds.pivot_table(['speed', 'runtime_h', 'avg_speed_route'],\r\n index=['segment_id', 'window'],\r\n aggfunc = 'mean'\r\n ).reset_index()\r\n \r\n data_all_lines = pd.merge(\r\n all_lines, \r\n segments_gdf.drop_duplicates(subset=['segment_id']), \r\n left_on=['segment_id'],\r\n right_on = ['segment_id'],\r\n how='left').reset_index().sort_values(by = ['direction_id','window','stop_sequence'], ascending=True)\r\n \r\n data_all_lines.drop(['index'], axis=1, inplace=True)\r\n data_all_lines['route_id'] = 'ALL_LINES'\r\n data_all_lines['route_name'] = 'All lines'\r\n data_all_lines['direction_id'] = 'NA'\r\n data_complete = data.append(data_all_lines)\r\n \r\n data_complete1 = data_complete.loc[~data_complete.route_name.isnull(), :].reset_index()\r\n \r\n \r\n # Get the columns in the right format\r\n int_columns = ['speed']\r\n \r\n for c in int_columns:\r\n data_complete1[c] = data_complete1[c].apply(lambda x: round(x,1))\r\n \r\n \r\n data_complete1 = data_complete1.loc[:,['route_id', 'route_name','direction_id','segment_id', 'window',\r\n 'speed', \r\n 'start_stop_id', 'start_stop_name', 'end_stop_id','end_stop_name', \r\n 'distance_m','stop_sequence', 'shape_id', 'runtime_h','geometry', ]] \r\n \r\n data_complete1.columns = ['route_id', 'route_name','dir_id', 'segment_id','window', \r\n 'speed',\r\n 's_st_id', 's_st_name', 'e_st_id','e_st_name',\r\n 'distance_m', 'stop_seq', 'shape_id','runtime_h', 'geometry']\r\n \r\n # Assign max speeds to each segment\r\n data_complete1 = pd.merge(data_complete1, max_speed_segment,\r\n left_on=['s_st_id', 'dir_id'], right_on = ['stop_id', 'direction_id'],\r\n how='left')\r\n \r\n gdf = gpd.GeoDataFrame(data = data_complete1.drop('geometry', axis=1), geometry=data_complete1.geometry)\r\n \r\n gdf.loc[gdf.dir_id==0,'dir_id'] = 'Inbound'\r\n gdf.loc[gdf.dir_id==1,'dir_id'] = 'Outbound'\r\n \r\n gdf.rename(columns={'speed': 'speed_kmh'}, inplace=True)\r\n gdf['speed_mph'] = gdf['speed_kmh']*0.621371\r\n gdf['max_mph'] = gdf['max_kmh']*0.621371\r\n \r\n gdf = gdf.drop(['shape_id'], axis=1).drop_duplicates()\r\n \r\n return gdf\r\n \r\ndef create_json(gdf, variable, filename,\r\n variable_label,\r\n filter_variables = [],\r\n filter_labels = [],\r\n colors = [],\r\n sizes = ['medium', 'medium', 'medium','medium','large','large'],\r\n breaks = [],\r\n default_values = [],\r\n symbol_layer = False,\r\n categories = ['Healthcare', 'Education', 'Food', 'Financial', 'Entertainment', 'Transportation', 'Others'], \r\n symbols = ['Hospital', 'School','Default', 'Official', 'Special', 'BusStop', 'Default'], \r\n ):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n \r\n import os\r\n import json\r\n import pandas as pd\r\n\r\n try:\r\n import utm\r\n except ImportError as e:\r\n os.system('pip install utm')\r\n import utm\r\n\r\n try:\r\n import jenkspy\r\n except ImportError as e:\r\n os.system('pip install jenkspy')\r\n import jenkspy\r\n if symbol_layer:\r\n # All categorical variable layer thing\r\n # We start with Remix Lightrail colors and then add default colors from Plotly\r\n # qualitative_palette = [blue, red, green, yellow, purple, aqua, pink, peach, melon]\r\n if colors == []:\r\n import plotly.express as px\r\n colors = ['#0066a1', '#a92023', '#066a40', '#e89b01', '#613fa6', '#024b50', '#a72051', '#a72f00', '#476800'] + px.colors.qualitative.Light24\r\n fill_color = pd.DataFrame(dict(variable=gdf[variable].unique(), fill_color = colors[0:len(gdf[variable].unique())]))\r\n gdf = pd.merge(gdf, fill_color, left_on=variable, right_on='variable', how='left')\r\n\r\n d = dict(\r\n category = categories,\r\n symbol = symbols\r\n )\r\n\r\n category_symbols = pd.DataFrame(d)\r\n\r\n gdf = pd.merge(gdf, category_symbols, how='left')\r\n\r\n var_symbol_color = gdf.pivot_table('id', index=[variable ,'symbol', 'fill_color'], aggfunc='count').reset_index()\r\n var_symbol_color['symbol_color'] = var_symbol_color.apply(lambda x: '{}{}'.format(x.symbol, x.fill_color), axis=1)\r\n\r\n symbols = []\r\n\r\n for v in gdf.variable.unique():\r\n aux = dict(\r\n input = v,\r\n value = var_symbol_color.loc[var_symbol_color[variable]==v,'symbol_color'].values[0]\r\n )\r\n symbols = symbols + [aux]\r\n\r\n icon = dict(\r\n type = 'categorical',\r\n values = symbols, # list of dict with values\r\n dataCol = variable, # could be amenity, group or catefory for example\r\n defaultValue = \"Default#000\"\r\n )\r\n\r\n label = dict(\r\n type = 'data-column',\r\n dataCol = 'name'\r\n )\r\n\r\n t = dict(\r\n type = 'symbol',\r\n icon = icon,\r\n label = label,\r\n configVersion = 1\r\n )\r\n else:\r\n # All line and circle numerical variable layers thing\r\n if colors == []:\r\n colors = [\"#D83D25\",\"#EF6933\",\"#F89041\",\"#fee090\",\"#91bfdb\",\"#4575b4\"],\r\n\r\n gdf[variable] = gdf[variable].map(int)\r\n \r\n if 'window' in list(gdf.columns):\r\n sort_windows=pd.DataFrame()\r\n sort_windows['window'] = gdf.window.unique()\r\n sort_windows['sort'] = [i.split(':')[0] for i in gdf.window.unique()]\r\n sort_windows['sort'] = sort_windows['sort'].astype(int)\r\n sort_windows.sort_values(by='sort', ascending=True, inplace=True)\r\n sort_windows.reset_index(inplace=True)\r\n \r\n # Calculate breaks the variable\r\n if breaks ==[]:\r\n breaks = jenkspy.jenks_breaks(gdf[variable], nb_class=len(colors))\r\n breaks = [int(b) for b in breaks]\r\n max_value = int(gdf[variable].max())\r\n bl = [int(b) for b in breaks]\r\n \r\n # Colors \r\n stops_color = []\r\n for i in range(len(colors)):\r\n aux = dict(input = bl[i], output = colors[i])\r\n stops_color = stops_color + [aux]\r\n \r\n color = dict(\r\n type='range',\r\n stops = stops_color,\r\n dataCol = variable,\r\n maxInput = max_value\r\n )\r\n \r\n # Sizes\r\n stops_size = []\r\n for i in range(len(colors)):\r\n aux = dict(input = bl[i], output = sizes[i])\r\n stops_size = stops_size + [aux]\r\n \r\n if gdf.geom_type[0] == 'Point':\r\n radius = dict(\r\n type='range',\r\n stops = stops_size,\r\n dataCol = variable,\r\n maxInput = max_value\r\n )\r\n gtype = 'circle'\r\n elif gdf.geom_type[0] == 'LineString':\r\n width = dict(\r\n type='range',\r\n stops = stops_size,\r\n dataCol = variable,\r\n maxInput = max_value\r\n )\r\n gtype = 'line'\r\n else:\r\n print(\"Check the geometry, it is not recognized as a LineString nor a Point\")\r\n \r\n # Legend labels\r\n filter_variables1 = [variable] + filter_variables\r\n filter_labels1 = [variable_label] + filter_labels\r\n \r\n legendLabels = dict(\r\n dataColLabels = {filter_variables1[i]: filter_labels1[i] for i in range(len(filter_variables1))}\r\n )\r\n \r\n # Filterable columns\r\n filterableColumns = []\r\n for f in filter_variables:\r\n if (f == 'route_name') & ('All lines' in list(gdf[f].unique())):\r\n aux = dict(\r\n values= ['All lines'] + list(gdf.loc[gdf.route_id!='ALL_LINES'].route_name.sort_values(ascending=True).unique()),\r\n dataCol = 'route_name',\r\n defaultValue = 'All lines'\r\n )\r\n elif (f != 'window')&(f != 'day_type'):\r\n if default_values[filter_variables.index(f)] == True:\r\n aux = dict(\r\n values = [str(x) for x in gdf[f].sort_values(ascending=True).unique()],\r\n dataCol = f,\r\n defaultValue = str(list(gdf[f].sort_values(ascending=True).unique())[0])\r\n )\r\n else:\r\n aux = dict(\r\n values = [str(x) for x in gdf[f].sort_values(ascending=True).unique()],\r\n dataCol = f\r\n )\r\n elif f == 'window':\r\n if len(sort_windows.window.unique())> 1:\r\n default_val = list(sort_windows.window.unique())[1]\r\n else:\r\n default_val = list(sort_windows.window.unique())[0]\r\n aux = dict(\r\n values = list(sort_windows.window.unique()),\r\n dataCol = 'window',\r\n defaultValue = default_val\r\n )\r\n elif f == 'day_type':\r\n aux = dict(\r\n values = ['Weekday', 'Saturday', 'Sunday'],\r\n dataCol = 'day_type',\r\n defaultValue = 'Weekday'\r\n )\r\n filterableColumns = filterableColumns + [aux]\r\n \r\n # Save the json file\r\n if gtype == 'circle':\r\n t = dict(\r\n type=gtype,\r\n color=color,\r\n radius=radius,\r\n legendLabels=legendLabels,\r\n configVersion= 1,\r\n filterableColumns=filterableColumns\r\n )\r\n elif gtype == 'line':\r\n t = dict(\r\n type=gtype,\r\n color=color,\r\n width=width,\r\n legendLabels=legendLabels,\r\n configVersion= 1,\r\n filterableColumns=filterableColumns\r\n )\r\n json_name = 'json_' + filename + '.json'\r\n with open(json_name, 'w') as outfile:\r\n json.dump(t, outfile)\r\n\r\ndef stops_freq(stop_times, stops, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import math\r\n import pandas as pd\r\n import os\r\n import re\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n hours = list(range(25))\r\n hours_labels = [str(hours[i]) + ':00' for i in range(len(hours)-1)]\r\n \r\n if max(cutoffs)<=24: \r\n stop_times_ok = stop_times.loc[stop_times.departure_time < 24*3600]\r\n stop_times_fix = stop_times.loc[stop_times.departure_time >= 24*3600]\r\n stop_times_fix['departure_time'] = [d - 24*3600 for d in stop_times_fix.departure_time]\r\n \r\n stop_times = stop_times_ok.append(stop_times_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n \r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n \r\n stop_times['departure_time'] = stop_times['departure_time']/3600\r\n \r\n # Put each trips in the right window\r\n stop_times['window'] = pd.cut(stop_times['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n stop_times = stop_times.loc[~stop_times.window.isnull()]\r\n stop_times['window'] = stop_times['window'].astype(str)\r\n stop_times['hour'] = pd.cut(stop_times['departure_time'], bins=hours, right=False, labels=hours_labels)\r\n stop_times['hour'] = stop_times['hour'].astype(str)\r\n \r\n trips_per_window = stop_times.pivot_table('trip_id', index=['stop_id', 'direction_id','window'], aggfunc='count').reset_index()\r\n trips_per_hour = stop_times.pivot_table('trip_id', index=['stop_id', 'direction_id','hour'], aggfunc='count').reset_index()\r\n \r\n trips_per_hour.rename(columns={'trip_id':'max_trips'}, inplace=True)\r\n trips_per_hour['max_frequency'] = (60/trips_per_hour['max_trips']).astype(int)\r\n \r\n max_trips = trips_per_hour.pivot_table('max_trips', index=['stop_id', 'direction_id'], aggfunc='max').reset_index()\r\n max_freq = trips_per_hour.pivot_table('max_frequency', index=['stop_id', 'direction_id'], aggfunc='min').reset_index()\r\n \r\n trips_per_window.rename(columns={'trip_id':'ntrips'}, inplace=True)\r\n start_time = trips_per_window['window'].apply(lambda x: int(x.split(':')[0]))\r\n end_time = trips_per_window['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)))\r\n \r\n trips_per_window['frequency'] = ((end_time - start_time)*60 / trips_per_window.ntrips).astype(int)\r\n stop_frequencies = pd.merge(trips_per_window, max_trips, how = 'left')\r\n stop_frequencies = pd.merge(stop_frequencies, max_freq, how = 'left')\r\n stop_frequencies = pd.merge(stop_frequencies, stops.loc[:, ['stop_id', 'stop_name', 'geometry']], how='left')\r\n stop_frequencies = gpd.GeoDataFrame(data=stop_frequencies.drop('geometry', axis=1), geometry=stop_frequencies.geometry)\r\n \r\n stop_frequencies.loc[stop_frequencies.direction_id == 0, 'direction_id'] = 'Inbound'\r\n stop_frequencies.loc[stop_frequencies.direction_id == 1, 'direction_id'] = 'Outbound'\r\n \r\n stop_frequencies.rename(columns={\r\n 'direction_id': 'dir_id',\r\n 'max_frequency': 'max_freq'\r\n }, inplace=True)\r\n stop_frequencies.sort_values(by='frequency', ascending=False, inplace=True)\r\n \r\n return stop_frequencies\r\n \r\ndef map_gdf(gdf, variable,\r\n colors = [\"#d13870\", \"#e895b3\" ,'#55d992', '#3ab071', '#0e8955','#066a40'],\r\n tooltip_var = [],\r\n tooltip_labels = [],\r\n breaks = []):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import branca\r\n import pandas as pd\r\n import os\r\n import plotly.express as px\r\n try:\r\n import jenkspy\r\n except ImportError as e:\r\n os.system('pip install jenkspy')\r\n import jenkspy\r\n \r\n try:\r\n import folium\r\n except ImportError as e:\r\n os.system('pip install folium')\r\n import folium\r\n\r\n # Look for the center of the map\r\n minx, miny, maxx, maxy = gdf.geometry.total_bounds\r\n \r\n centroid_lat = miny + (maxy - miny)/2\r\n centroid_lon = minx + (maxx - minx)/2 \r\n \r\n if isinstance(gdf[variable].values[0], str):\r\n categorical = True\r\n else: \r\n categorical = False\r\n \r\n # Calculate the breaks if they were not specified\r\n if (breaks == []) & (not categorical):\r\n breaks = jenkspy.jenks_breaks(gdf[variable], nb_class=len(colors))\r\n breaks = [int(b) for b in breaks]\r\n \r\n m = folium.Map(location=[centroid_lat, centroid_lon], \r\n tiles='cartodbpositron', zoom_start=12\r\n )\r\n # If the variable is categorical\r\n if categorical:\r\n gdf['radius'] = 5\r\n # qualitative_palette = [blue, red, green, yellow, purple, aqua, pink, peach,melon]\r\n # We start with Remix Lightrail colors and then add default colors from Plotly\r\n qualitative_palette = ['#0066a1', '#a92023', '#066a40', '#e89b01', '#613fa6', '#024b50', '#a72051', '#a72f00', '#476800']\r\n color_palette = qualitative_palette + px.colors.qualitative.Pastel + px.colors.qualitative.Prism + px.colors.qualitative.Vivid + px.colors.qualitative.Light24\r\n fill_color = pd.DataFrame(dict(variable=gdf[variable].unique(), fill_color = color_palette[0:len(gdf[variable].unique())])) \r\n gdf=pd.merge(gdf, fill_color, left_on=variable, right_on='variable', how='left')\r\n # If the variable is numerical\r\n else:\r\n gdf['radius'] = gdf[variable]\r\n index = [int(b) for b in breaks]\r\n colorscale = branca.colormap.StepColormap(colors, index = index, caption=variable)\r\n gdf['fill_color'] = gdf[variable].apply(lambda x: colorscale(x)) \r\n \r\n if gdf.geom_type.values[0] == 'Point':\r\n # my code for circles\r\n # Create the circles\r\n for i in range(int(len(gdf))):\r\n folium.CircleMarker(\r\n location=[gdf.loc[i, 'geometry'].y, gdf.loc[i, 'geometry'].x], \r\n radius = float(gdf.loc[i, 'radius']),\r\n #popup=geo_data.loc[i, 'stop_name'], \r\n tooltip = tooltip_labels[0] + str(gdf.loc[i, tooltip_var[0]]), \r\n color='#ffffff00',\r\n fill = True,\r\n fill_opacity = .7,\r\n fill_color = str(gdf.loc[i, 'fill_color'])\r\n ).add_to(m)\r\n else:\r\n # Styling function for LineStrings \r\n def style_function(feature):\r\n return {\r\n 'fillOpacity': 0.5,\r\n 'weight': 3,#math.log2(feature['properties']['speed'])*2,\r\n 'color': feature['properties']['fill_color']\r\n }\r\n # my code for lines\r\n geo_data = gdf.__geo_interface__\r\n folium.GeoJson(\r\n geo_data, \r\n style_function = style_function,\r\n tooltip = folium.features.GeoJsonTooltip(fields=tooltip_var,\r\n aliases = tooltip_labels,\r\n labels=True,\r\n sticky=False)\r\n ).add_to(m)\r\n \r\n return m\r\n\r\ndef lines_freq(stop_times, trips, shapes, routes, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import math\r\n import pandas as pd\r\n import os\r\n import re\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n # Generate the hours of the day\r\n hours = list(range(25))\r\n hours_labels = [str(hours[i]) + ':00' for i in range(len(hours)-1)]\r\n \r\n # Generate the time windows and cutoffs\r\n if max(cutoffs)<=24: \r\n stop_times_ok = stop_times.loc[stop_times.departure_time < 24*3600]\r\n stop_times_fix = stop_times.loc[stop_times.departure_time >= 24*3600]\r\n stop_times_fix['departure_time'] = [d - 24*3600 for d in stop_times_fix.departure_time]\r\n \r\n stop_times = stop_times_ok.append(stop_times_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n \r\n # Generate the labels\r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n \r\n stop_times['departure_time'] = stop_times['departure_time']/3600\r\n \r\n # Put each trips in the right window\r\n stop_times['window'] = pd.cut(stop_times['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n stop_times = stop_times.loc[~stop_times.window.isnull()]\r\n stop_times['window'] = stop_times['window'].astype(str)\r\n stop_times['hour'] = pd.cut(stop_times['departure_time'], bins=hours, right=False, labels=hours_labels)\r\n stop_times['hour'] = stop_times['hour'].astype(str)\r\n \r\n stop_times_first = stop_times.loc[stop_times.stop_sequence==1,:]\r\n \r\n # Count number of trips per windows and hour\r\n trips_per_window = stop_times_first.pivot_table('trip_id', index=['route_id','direction_id','window'], aggfunc='count').reset_index()\r\n trips_per_hour = stop_times_first.pivot_table('trip_id', index=['route_id', 'direction_id','hour'], aggfunc='count').reset_index()\r\n \r\n # Calculate the hourly frequency\r\n trips_per_hour.rename(columns={'trip_id':'max_trips'}, inplace=True)\r\n trips_per_hour['max_frequency'] = (60/trips_per_hour['max_trips']).astype(int)\r\n \r\n # Get max number of trips and highest frequency\r\n max_trips = trips_per_hour.pivot_table('max_trips', index=['route_id', 'direction_id'], aggfunc='max').reset_index()\r\n max_freq = trips_per_hour.pivot_table('max_frequency', index=['route_id', 'direction_id'], aggfunc='min').reset_index()\r\n \r\n # Calculate frequency per window for each route\r\n trips_per_window.rename(columns={'trip_id':'ntrips'}, inplace=True)\r\n start_time = trips_per_window['window'].apply(lambda x: int(x.split(':')[0]))\r\n end_time = trips_per_window['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)))\r\n \r\n trips_per_window['frequency'] = ((end_time - start_time)*60 / trips_per_window.ntrips).astype(int)\r\n line_frequencies = pd.merge(trips_per_window, max_trips, how = 'left')\r\n line_frequencies = pd.merge(line_frequencies, max_freq, how = 'left')\r\n \r\n aux = trips.loc[trips.service_id=='1',['route_id', 'direction_id', 'shape_id']].drop_duplicates()\r\n aux = pd.merge(line_frequencies, aux, how='left')\r\n line_frequencies_gdf = pd.merge(aux, shapes, how='left')\r\n # Route name\r\n routes['route_name'] = ''\r\n if routes.route_short_name.isnull().unique()[0]:\r\n routes['route_name'] = routes.route_long_name\r\n elif routes.route_long_name.isnull().unique()[0]: \r\n routes['route_name'] = routes.route_short_name\r\n else:\r\n routes['route_name'] = routes.route_short_name + ' ' + routes.route_long_name\r\n\r\n line_frequencies_gdf = pd.merge(line_frequencies_gdf, routes[['route_id', 'route_name']])\r\n \r\n gdf = gpd.GeoDataFrame(data=line_frequencies_gdf.drop('geometry', axis=1), geometry=line_frequencies_gdf.geometry)\r\n \r\n gdf.loc[gdf.direction_id == 0, 'direction_id'] = 'Inbound'\r\n gdf.loc[gdf.direction_id == 1, 'direction_id'] = 'Outbound'\r\n \r\n \r\n gdf.rename(columns={\r\n 'direction_id': 'dir_id',\r\n 'max_frequency': 'max_freq',\r\n }, inplace=True)\r\n \r\n gdf = gdf.loc[:,['route_id', 'route_name', 'dir_id', 'window',\r\n 'frequency', 'ntrips',\r\n 'max_freq', 'max_trips', 'geometry']]\r\n gdf = gdf.loc[~gdf.geometry.isnull()]\r\n gdf.sort_values(by='frequency', ascending=False, inplace=True)\r\n \r\n return gdf\r\n \r\ndef segments_freq(segments_gdf, stop_times, routes, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import math\r\n import pandas as pd\r\n import os\r\n import re\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n # Generate the hours of the day\r\n hours = list(range(25))\r\n hours_labels = [str(hours[i]) + ':00' for i in range(len(hours)-1)]\r\n\r\n # Generate the time windows and cutoffs\r\n if max(cutoffs)<=24: \r\n stop_times_ok = stop_times.loc[stop_times.departure_time < 24*3600]\r\n stop_times_fix = stop_times.loc[stop_times.departure_time >= 24*3600]\r\n stop_times_fix['departure_time'] = [d - 24*3600 for d in stop_times_fix.departure_time]\r\n\r\n stop_times = stop_times_ok.append(stop_times_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n\r\n # Generate the labels\r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n\r\n stop_times['departure_time'] = stop_times['departure_time']/3600\r\n\r\n # Put each trips in the right window\r\n stop_times['window'] = pd.cut(stop_times['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n stop_times = stop_times.loc[~stop_times['window'].isnull()]\r\n stop_times['window'] = stop_times['window'].astype(str)\r\n\r\n stop_times['hour'] = pd.cut(stop_times['departure_time'], bins=hours, right=False, labels=hours_labels)\r\n stop_times['hour'] = stop_times['hour'].astype(str)\r\n\r\n # Count number of trips per windows and hour\r\n\r\n trips_per_window = stop_times.pivot_table('trip_id', index=['route_id','stop_id', 'direction_id','window'], aggfunc='count').reset_index()\r\n trips_per_hour = stop_times.pivot_table('trip_id', index=['route_id','stop_id', 'direction_id','hour'], aggfunc='count').reset_index()\r\n\r\n # Calculate the hourly frequency\r\n trips_per_hour.rename(columns={'trip_id':'max_trips'}, inplace=True)\r\n trips_per_hour['max_frequency'] = (60/trips_per_hour['max_trips']).astype(int)\r\n\r\n # Get max number of trips and highest frequency\r\n max_trips = trips_per_hour.pivot_table('max_trips', index=['route_id','stop_id', 'direction_id'], aggfunc='max').reset_index()\r\n max_freq = trips_per_hour.pivot_table('max_frequency', index=['route_id','stop_id', 'direction_id'], aggfunc='min').reset_index()\r\n\r\n\r\n # Calculate frequency per window for each route\r\n trips_per_window.rename(columns={'trip_id':'ntrips'}, inplace=True)\r\n start_time = trips_per_window['window'].apply(lambda x: int(x.split(':')[0])+(int(x.split(':')[1][:2])/60))\r\n end_time = trips_per_window['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)) + (int(x.split(':')[2])/60))\r\n\r\n trips_per_window['frequency'] = ((end_time - start_time)*60 / trips_per_window.ntrips).astype(int)\r\n\r\n line_frequencies = pd.merge(trips_per_window, max_trips, how = 'left')\r\n line_frequencies = pd.merge(line_frequencies, max_freq, how = 'left')\r\n line_frequencies = pd.merge(line_frequencies, \r\n segments_gdf.loc[:, ['route_id', 'segment_id', 'start_stop_id', 'start_stop_name', 'end_stop_name','direction_id', 'geometry']],\r\n left_on=['route_id','stop_id', 'direction_id'],\r\n right_on=['route_id','start_stop_id', 'direction_id'], \r\n how='left')\r\n\r\n line_frequencies.drop_duplicates(subset=['route_id', 'stop_id', 'direction_id', 'window', 'ntrips', 'frequency',\r\n 'max_trips', 'max_frequency', 'segment_id', 'start_stop_id',\r\n 'start_stop_name', 'end_stop_name'], inplace=True)\r\n\r\n # Route name\r\n routes['route_name'] = ''\r\n if routes.route_short_name.isnull().unique()[0]:\r\n routes['route_name'] = routes.route_long_name\r\n elif routes.route_long_name.isnull().unique()[0]: \r\n routes['route_name'] = routes.route_short_name\r\n else:\r\n routes['route_name'] = routes.route_short_name + ' ' + routes.route_long_name\r\n \r\n line_frequencies = pd.merge(line_frequencies, routes.loc[:,['route_id','route_name']],how='left')\r\n\r\n # Calculate sum of trips per segment with all lines\r\n all_lines = line_frequencies.pivot_table(['ntrips'],\r\n index=['segment_id', 'window'],\r\n aggfunc = 'sum'\r\n ).reset_index()\r\n\r\n # Calculate frequency per window for all routes\r\n start_time = all_lines['window'].apply(lambda x: int(x.split(':')[0])+(int(x.split(':')[1][:2])/60))\r\n end_time = all_lines['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)) + (int(x.split(':')[2])/60))\r\n\r\n all_lines['frequency'] = ((end_time - start_time)*60 / all_lines.ntrips).astype(int)\r\n\r\n # Get max number of trips and highest frequency per segment for all routes\r\n max_trips_all_lines = all_lines.pivot_table('ntrips', index=['segment_id'], aggfunc='max').reset_index()\r\n max_freq_all_lines = all_lines.pivot_table('frequency', index=['segment_id'], aggfunc='min').reset_index()\r\n\r\n max_trips_all_lines.rename(columns=dict(ntrips='max_trips'), inplace=True)\r\n max_freq_all_lines.rename(columns=dict(frequency='max_frequency'), inplace=True)\r\n\r\n all_lines = pd.merge(all_lines, max_trips_all_lines, how = 'left')\r\n all_lines = pd.merge(all_lines, max_freq_all_lines, how = 'left')\r\n\r\n data_all_lines = pd.merge(\r\n all_lines, \r\n segments_gdf.drop_duplicates(subset=['segment_id']), \r\n left_on=['segment_id'],\r\n right_on = ['segment_id'],\r\n how='left').reset_index().sort_values(by = ['direction_id','window','stop_sequence'], ascending=True)\r\n\r\n data_all_lines.drop(['index'], axis=1, inplace=True)\r\n data_all_lines['route_id'] = 'ALL_LINES'\r\n data_all_lines['route_name'] = 'All lines'\r\n data_all_lines['direction_id'] = 'NA'\r\n data_complete = line_frequencies.append(data_all_lines).reset_index()\r\n\r\n gdf = gpd.GeoDataFrame(data=data_complete.drop('geometry', axis=1), geometry=data_complete.geometry)\r\n\r\n gdf.loc[gdf.direction_id == 0, 'direction_id'] = 'Inbound'\r\n gdf.loc[gdf.direction_id == 1, 'direction_id'] = 'Outbound'\r\n\r\n\r\n gdf.rename(columns={\r\n 'direction_id': 'dir_id',\r\n 'max_frequency': 'max_freq',\r\n 'start_stop_name': 's_st_name',\r\n 'end_stop_name': 'e_st_name',\r\n 'start_stop_id':'s_st_id'\r\n }, inplace=True)\r\n\r\n gdf = gdf.loc[:,['route_id', 'route_name', 'dir_id', 'segment_id', 'window',\r\n 'frequency', 'ntrips', 's_st_id', 's_st_name', 'e_st_name',\r\n 'max_freq', 'max_trips', 'geometry']]\r\n gdf = gdf.loc[~gdf.geometry.isnull()]\r\n gdf.sort_values(by='frequency', ascending=False, inplace=True)\r\n\r\n return gdf\r\n \r\ndef download_osm(gdf):\r\n # Define the bounding box to query\r\n bounds = gdf.geometry.total_bounds\r\n\r\n # Build the query for overspass-api\r\n overpass_url = \"http://overpass-api.de/api/interpreter\"\r\n# overpass_query = \"\"\"\r\n# [out:json];\r\n# (way[\"highway\"~\"motorway|trunk|primary|secondary|tertiary|unclassified|residential|service|living_street\"]\r\n# [\"access\"!~\"private|no\"]\r\n# ({0}, {1}, {2}, {3}););\r\n# out geom;\r\n# \"\"\".format(bounds[1], bounds[0], bounds[3], bounds[2])\r\n\r\n overpass_query = \"\"\"\r\n [out:json];\r\n (way[\"highway\"~\"motorway|trunk|primary|secondary|tertiary|unclassified|residential|service|living_street\"]\r\n ({0}, {1}, {2}, {3}););\r\n out geom;\r\n \"\"\".format(bounds[1], bounds[0], bounds[3], bounds[2])\r\n\r\n # Query overpass-api\r\n response = requests.get(overpass_url, \r\n params={'data': overpass_query})\r\n\r\n # Put the response in a DataFrame\r\n data = response.json()\r\n ways_df = pd.DataFrame(data['elements'])\r\n\r\n # Parse the content in lists\r\n node_ids = []\r\n lat_lon = []\r\n way_ids = []\r\n oneway = []\r\n segment_seq = []\r\n\r\n n_nodes = [len(n) for n in list(ways_df.nodes)]\r\n\r\n [node_ids.extend(n) for n in list(ways_df.nodes)]\r\n [lat_lon.extend(g) for g in list(ways_df.geometry)]\r\n [way_ids.extend([ways_df.loc[i, 'id']]*n_nodes[i]) for i in range(0, len(ways_df))] \r\n [oneway.extend([ways_df.loc[i, 'tags'].get('oneway', '0')]*n_nodes[i]) for i in range(0, len(ways_df))]\r\n [segment_seq.extend(list(range(1, n_nodes[i]+1))) for i in range(0, len(ways_df))] # segment sequence for that way_id\r\n\r\n # Convert to int to save memory\r\n oneway = [1 if s=='yes' else s for s in oneway] \r\n oneway = [0 if s in ['no', '0', 'reversible', '-1'] else s for s in oneway] \r\n oneway = list(map(int, oneway))\r\n\r\n # ------------------------------------------------------------------------------------\r\n # ------------------------------ NODES -----------------------------------------------\r\n # ------------------------------------------------------------------------------------\r\n\r\n # Parse the json into a dataframe\r\n nodes = pd.DataFrame()\r\n nodes['way_id'] = way_ids\r\n nodes['node_id'] = node_ids\r\n nodes['oneway'] = oneway\r\n nodes['segment_seq'] = segment_seq\r\n\r\n # Get lat,lon values right\r\n lat = [p['lat'] for p in lat_lon]\r\n lon = [p['lon'] for p in lat_lon]\r\n\r\n # Create points\r\n points = [Point(lon[i], lat[i]) for i in range(0, len(lat))]\r\n\r\n # Create GeoDataFrame\r\n nodes_gdf = gpd.GeoDataFrame(data=nodes, geometry = points)\r\n\r\n # ------------------------------------------------------------------------------------\r\n # --------------------------- SEGMENTS -----------------------------------------------\r\n # ------------------------------------------------------------------------------------\r\n\r\n # Define our lists\r\n # Does the node has the same way_id as the next node?\r\n bool_list = nodes['way_id'] == nodes['way_id'].shift(-1)\r\n # Nodes of the segment\r\n segment_nodes = ['{0} - {1}'.format(str(node_ids[i]), str(node_ids[i+1])) for i in range(0,len(node_ids)-1)]\r\n segment_ids = list(range(1, len(segment_nodes)+1))\r\n points_next = points[1:] + [None]\r\n\r\n # Remove the last node of the segment (it is already in the last segment)\r\n segment_nodes = list(compress(segment_nodes, bool_list)) \r\n segment_ids = list(compress(segment_ids, bool_list)) \r\n points = list(compress(points, bool_list)) \r\n points_next = list(compress(points_next, bool_list)) \r\n geometry = [LineString([points[i], points_next[i]]) for i in range(0,len(segment_nodes))]\r\n\r\n # Keep the segments and create the geo data frame\r\n segments = nodes.loc[bool_list, ['way_id', 'oneway', 'segment_seq']]\r\n segments['segment_nodes'] = segment_nodes\r\n segments['osm_segment_id'] = segment_ids\r\n segments_gdf = gpd.GeoDataFrame(data=segments, geometry = geometry)\r\n\r\n # ------------------------------------------------------------------------------------\r\n # --------------------------- ADD OPPOSITE SEGMENTS ----------------------------------\r\n # ------------------------------------------------------------------------------------\r\n\r\n # Create the opposite segments for two way streets\r\n opposite = segments_gdf.loc[segments_gdf.oneway == 0].reset_index()\r\n\r\n opp_nodes = ['{0} - {1}'.format(opposite.loc[i,'segment_nodes'].split(' - ')[1], opposite.loc[i,'segment_nodes'].split(' - ')[0]) for i in range(0,len(opposite))]\r\n opp_way_id = list(opposite.loc[:,'way_id'])\r\n opp_osm_segment_id = list(range(segments_gdf.osm_segment_id.max()+1, segments_gdf.osm_segment_id.max() + len(opposite) + 1))\r\n\r\n opp_geom = opposite.geometry.apply(lambda x: LineString([x.coords[1], x.coords[0]]))\r\n\r\n opp_df = pd.DataFrame()\r\n opp_df['way_id'] = opp_way_id\r\n opp_df['segment_nodes'] = opp_nodes\r\n opp_df['oneway'] = 0\r\n opp_df['osm_segment_id'] = opp_osm_segment_id\r\n opp_df['segment_seq'] = 0\r\n\r\n opp_gdf = gpd.GeoDataFrame(data=opp_df, geometry=opp_geom)\r\n\r\n segments_gdf = segments_gdf.append(opp_gdf)\r\n\r\n # Add \"from\" and \"to\" columns to make the graph generation easier\r\n segments_gdf['from'] = [int(s.split(' - ')[0]) for s in segments_gdf['segment_nodes']]\r\n segments_gdf['to'] = [int(s.split(' - ')[1]) for s in segments_gdf['segment_nodes']]\r\n \r\n return nodes_gdf, segments_gdf\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n "
] |
[
[
"pandas.merge",
"pandas.read_csv",
"pandas.DataFrame",
"pandas.cut",
"pandas.DataFrame.from_dict"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
merepbj/web-scraping-challenge
|
[
"cf4c401cad78b68af9fe508225ceb48bba99ba83"
] |
[
"Missions_to-Mars/scrape_mars.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nfrom splinter import Browser\nimport time\nbrowser = Browser('chrome','chromedriver')\n\ndef scrape(): \n title, paragraph = mars_news(browser)\n \n data = {\n \"news_title\": title, \n \"news_paragraph\": paragraph,\n \"news_image\": mars_images(browser),\n \"news_facts\": mars_facts(),\n \"news_hemisphere\": mars_hemisphere(browser)\n }\n return data\n\n\n# In[2]:\n\n\n# ### NASA Mars News\n# \n# * Scrape the [NASA Mars News Site](https://mars.nasa.gov/news/) and collect the latest News Title and Paragraph Text. Assign the text to variables that you can reference later.\n\n# In[3]:\n\ndef mars_news(browser): \n\n browser.visit('https://mars.nasa.gov/news/')\n title = browser.find_by_css('div.content_title a').text\n paragraph = browser.find_by_css('div.article_teaser_body').text\n return title, paragraph\n \n\n# ### JPL Mars Space Images - Featured Image\n# \n# * Visit the url for JPL Featured Space Image [here](https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html).\n# \n# * Use splinter to navigate the site and find the image url for the current Featured Mars Image and assign the url string to a variable called `featured_image_url`.\n# \n# * Make sure to find the image url to the full size `.jpg` image.\n# \n# * Make sure to save a complete url string for this image.\n\n# In[4]:\n\ndef mars_images(browser):\n\n browser.visit('https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html')\n browser.links.find_by_partial_text('FULL IMAGE').click()\n image = browser.find_by_css('img.fancybox-image')['src']\n return image\n\n# ### Mars Facts\n# \n# * Visit the Mars Facts webpage [here](https://space-facts.com/mars/) and use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.\n# \n# * Use Pandas to convert the data to a HTML table string.\n\n# In[5]:\n\ndef mars_facts():\n return pd.read_html('https://space-facts.com/mars/')[0].to_html(classes='table table-stripped')\n\n\n# ### Mars Hemispheres\n# \n# * Visit the USGS Astrogeology site [here](https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars) to obtain high resolution images for each of Mar's hemispheres.\n# \n# * You will need to click each of the links to the hemispheres in order to find the image url to the full resolution image.\n# \n# * Save both the image url string for the full resolution hemisphere image, and the Hemisphere title containing the hemisphere name. Use a Python dictionary to store the data using the keys `img_url` and `title`.\n# \n# * Append the dictionary with the image url string and the hemisphere title to a list. This list will contain one dictionary for each hemisphere.\n\n# In[6]:\n\ndef mars_hemisphere(browser):\n browser.visit('https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars')\n\n\n # In[7]:\n\n\n links = browser.find_by_css('a.itemLink h3')\n\n\n # In[8]:\n\n\n hemispheres = []\n for i in range(len(links)):\n hemisphere = {}\n \n hemisphere['title'] = browser.find_by_css('a.itemLink h3')[i].text\n browser.find_by_css('a.itemLink h3')[i].click()\n hemisphere['url'] = browser.find_by_text('Sample')['href']\n hemispheres.append(hemisphere)\n browser.back()\n browser.quit()\n return hemispheres\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"pandas.read_html"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
xssstory/cogdl
|
[
"ae8de495c365993f19f04774f083960fd282c2a3",
"ae8de495c365993f19f04774f083960fd282c2a3",
"ae8de495c365993f19f04774f083960fd282c2a3",
"ae8de495c365993f19f04774f083960fd282c2a3",
"ae8de495c365993f19f04774f083960fd282c2a3"
] |
[
"cogdl/tasks/node_classification.py",
"examples/custom_dataset.py",
"cogdl/tasks/pretrain.py",
"cogdl/models/emb/gatne.py",
"cogdl/models/nn/pyg_unsup_graphsage.py"
] |
[
"import argparse\nimport copy\nfrom typing import Optional\nimport scipy.sparse as sp\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nfrom cogdl.datasets import build_dataset\nfrom cogdl.models import build_model\nfrom cogdl.models.supervised_model import SupervisedHomogeneousNodeClassificationModel\nfrom cogdl.trainers.supervised_trainer import (\n SupervisedHomogeneousNodeClassificationTrainer,\n)\nfrom cogdl.trainers.sampled_trainer import SAINTTrainer\n\nfrom . import BaseTask, register_task\n\n\ndef normalize_adj_row(adj):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = np.array(adj.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n mx = r_mat_inv.dot(adj)\n return mx\n\n\ndef to_torch_sparse(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\n\ndef row_l1_normalize(X):\n norm = 1e-6 + X.sum(dim=1, keepdim=True)\n return X/norm\n\n\ndef preprocess_data_sgcpn(data, normalize_feature=True, missing_rate=0):\n data.train_mask = data.train_mask.type(torch.bool)\n data.val_mask = data.val_mask.type(torch.bool)\n # expand test_mask to all rest nodes\n data.test_mask = ~(data.train_mask + data.val_mask)\n # get adjacency matrix\n n = len(data.x)\n adj = sp.csr_matrix((np.ones(data.edge_index.shape[1]), data.edge_index), shape=(n, n))\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj) + sp.eye(adj.shape[0])\n adj = normalize_adj_row(adj)\n data.adj = to_torch_sparse(adj).to_dense()\n if normalize_feature:\n data.x = row_l1_normalize(data.x)\n erasing_pool = torch.arange(n)[~data.train_mask]\n size = int(len(erasing_pool) * (missing_rate / 100))\n idx_erased = np.random.choice(erasing_pool, size=size, replace=False)\n if missing_rate > 0:\n data.x[idx_erased] = 0\n return data\n\n\n@register_task(\"node_classification\")\nclass NodeClassification(BaseTask):\n \"\"\"Node classification task.\"\"\"\n\n @staticmethod\n def add_args(parser: argparse.ArgumentParser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument(\"--missing-rate\", type=int, default=-1)\n # fmt: on\n\n def __init__(\n self,\n args,\n dataset=None,\n model: Optional[SupervisedHomogeneousNodeClassificationModel] = None,\n ):\n super(NodeClassification, self).__init__(args)\n\n self.args = args\n self.model_name = args.model\n self.device = args.device_id[0] if not args.cpu else \"cpu\"\n dataset = build_dataset(args) if dataset is None else dataset\n if args.missing_rate >= 0:\n if args.model == 'sgcpn':\n assert args.dataset in ['cora', 'citeseer', 'pubmed']\n dataset.data = preprocess_data_sgcpn(dataset.data, normalize_feature=True, missing_rate=0)\n adj_slice = torch.tensor(dataset.data.adj.size())\n adj_slice[0] = 0\n dataset.slices['adj'] = adj_slice\n\n self.dataset = dataset\n self.data = dataset[0]\n args.num_features = dataset.num_features\n args.num_classes = dataset.num_classes\n args.num_nodes = dataset.data.x.shape[0]\n\n self.model: SupervisedHomogeneousNodeClassificationModel = build_model(args) if model is None else model\n self.model.set_device(self.device)\n\n self.trainer: Optional[\n SupervisedHomogeneousNodeClassificationTrainer\n ] = self.model.get_trainer(NodeClassification, self.args)(\n self.args\n ) if self.model.get_trainer(\n NodeClassification, self.args\n ) else None\n\n if not self.trainer:\n self.optimizer = torch.optim.Adam(\n self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay\n ) if not hasattr(self.model, \"get_optimizer\") else self.model.get_optimizer(args)\n self.data.apply(lambda x: x.to(self.device))\n self.model: SupervisedHomogeneousNodeClassificationModel = self.model.to(\n self.device\n )\n self.patience = args.patience\n self.max_epoch = args.max_epoch\n\n def train(self):\n if self.trainer:\n if isinstance(self.trainer, SAINTTrainer):\n self.model = self.trainer.fit(self.model, self.dataset)\n self.data.apply(lambda x: x.to(self.device))\n else:\n result = self.trainer.fit(self.model, self.dataset)\n if issubclass(type(result), torch.nn.Module):\n self.model = result\n else:\n return result\n else:\n epoch_iter = tqdm(range(self.max_epoch))\n patience = 0\n best_score = 0\n best_loss = np.inf\n max_score = 0\n min_loss = np.inf\n best_model = copy.deepcopy(self.model)\n for epoch in epoch_iter:\n self._train_step()\n train_acc, _ = self._test_step(split=\"train\")\n val_acc, val_loss = self._test_step(split=\"val\")\n epoch_iter.set_description(\n f\"Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}\"\n )\n if val_loss <= min_loss or val_acc >= max_score:\n if val_loss <= best_loss: # and val_acc >= best_score:\n best_loss = val_loss\n best_score = val_acc\n best_model = copy.deepcopy(self.model)\n min_loss = np.min((min_loss, val_loss))\n max_score = np.max((max_score, val_acc))\n patience = 0\n else:\n patience += 1\n if patience == self.patience:\n epoch_iter.close()\n break\n print(f\"Valid accurracy = {best_score}\")\n self.model = best_model\n test_acc, _ = self._test_step(split=\"test\")\n val_acc, _ = self._test_step(split=\"val\")\n print(f\"Test accuracy = {test_acc}\")\n return dict(Acc=test_acc, ValAcc=val_acc)\n\n def _train_step(self):\n self.model.train()\n self.optimizer.zero_grad()\n self.model.loss(self.data).backward()\n self.optimizer.step()\n\n def _test_step(self, split=\"val\", logits=None):\n self.model.eval()\n logits = logits if logits else self.model.predict(self.data)\n if split == \"train\":\n mask = self.data.train_mask\n elif split == \"val\":\n mask = self.data.val_mask\n else:\n mask = self.data.test_mask\n loss = F.nll_loss(logits[mask], self.data.y[mask]).item()\n\n pred = logits[mask].max(1)[1]\n acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()\n return acc, loss\n",
"from cogdl.data.data import Data\nimport torch\n\nfrom cogdl.tasks import build_task\nfrom cogdl.models import build_model\nfrom cogdl.utils import build_args_from_dict\nfrom cogdl.data import Dataset\n\n\n\"\"\"Define your data\"\"\"\nclass MyData(object):\n def __init__(self):\n num_nodes = 100\n num_edges = 300\n feat_dim = 30\n # load or generate data\n self.edge_index = torch.randint(0, num_nodes, (2, num_edges))\n self.x = torch.randn(num_nodes, feat_dim)\n self.y = torch.randint(0, 2, (num_nodes,))\n\n # set train/val/test mask in node_classification task\n self.train_mask = torch.zeros(num_nodes).bool()\n self.train_mask[0:int(0.3*num_nodes)] = True\n self.val_mask = torch.zeros(num_nodes).bool()\n self.val_mask[int(0.3*num_nodes):int(0.7*num_nodes)] = True\n self.test_mask = torch.zeros(num_nodes).bool()\n self.test_mask[int(0.7*num_nodes):] = True\n\n def apply(self, func):\n for name, value in vars(self).items():\n setattr(self, name, func(value))\n\n @property\n def num_features(self):\n return self.x.shape[1]\n \n @property\n def num_classes(self):\n return int(torch.max(self.y)) + 1\n\n\n\"\"\"Define your dataset\"\"\"\nclass MyDataset(object):\n def __init__(self, datalist):\n self.datalist = datalist\n self.data = self.datalist[0]\n self.num_features = self.datalist[0].num_features\n self.num_classes = self.datalist[0].num_classes\n\n def __getitem__(self, index):\n assert index == 0\n return self.datalist[index]\n\n\n\ndef get_default_args():\n cuda_available = torch.cuda.is_available()\n default_dict = {'hidden_size': 16,\n 'dropout': 0.5,\n 'patience': 100,\n 'max_epoch': 500,\n 'cpu': not cuda_available,\n 'lr': 0.01,\n 'device_id': [0],\n 'weight_decay': 5e-4}\n return build_args_from_dict(default_dict)\n\n\ndef main_dataset():\n args = get_default_args()\n args.task = \"node_classification\"\n args.model = \"gcn\"\n # use customized dataset\n mydata = MyData()\n dataset = MyDataset([mydata])\n args.num_features = dataset.num_features\n args.num_classes = dataset.num_classes\n # use model in cogdl\n model = build_model(args)\n task = build_task(args, dataset, model)\n result = task.train()\n print(result)\n\n\nif __name__ == \"__main__\":\n main_dataset()",
"import argparse\nimport torch\n\nfrom . import register_task, BaseTask\nfrom cogdl.models import build_model\n\n\n@register_task(\"pretrain\")\nclass PretrainTask(BaseTask):\n @staticmethod\n def add_args(_: argparse.ArgumentParser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n # fmt: off\n # parser.add_argument(\"--num-features\", type=int)\n # fmt: on\n\n def __init__(self, args):\n super(PretrainTask, self).__init__(args)\n self.device = torch.device(\"cpu\" if args.cpu else \"cuda\")\n self.model = build_model(args)\n self.model = self.model.to(self.device)\n \n def train(self):\n return self.model.trainer.fit()\n",
"import numpy as np\nimport networkx as nx\nfrom collections import defaultdict\nfrom gensim.models.keyedvectors import Vocab\nfrom six import iteritems\nimport random\nimport math\nimport tqdm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\n\nfrom .. import BaseModel, register_model\n\n\n@register_model(\"gatne\")\nclass GATNE(BaseModel):\n r\"\"\"The GATNE model from the `\"Representation Learning for Attributed Multiplex Heterogeneous Network\"\n <https://dl.acm.org/doi/10.1145/3292500.3330964>`_ paper\n\n Args:\n walk_length (int) : The walk length.\n walk_num (int) : The number of walks to sample for each node.\n window_size (int) : The actual context size which is considered in language model.\n worker (int) : The number of workers for word2vec.\n epoch (int) : The number of training epochs.\n batch_size (int) : The size of each training batch.\n edge_dim (int) : Number of edge embedding dimensions.\n att_dim (int) : Number of attention dimensions.\n negative_samples (int) : Negative samples for optimization.\n neighbor_samples (int) : Neighbor samples for aggregation\n schema (str) : The metapath schema used in model. Metapaths are splited with \",\", \n while each node type are connected with \"-\" in each metapath. For example:\"0-1-0,0-1-2-1-0\"\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--walk-length', type=int, default=10,\n help='Length of walk per source. Default is 10.')\n parser.add_argument('--walk-num', type=int, default=10,\n help='Number of walks per source. Default is 10.')\n parser.add_argument('--window-size', type=int, default=5,\n help='Window size of skip-gram model. Default is 5.')\n parser.add_argument('--worker', type=int, default=10,\n help='Number of parallel workers. Default is 10.')\n parser.add_argument('--epoch', type=int, default=20,\n help='Number of epoch. Default is 20.')\n parser.add_argument('--batch-size', type=int, default=256,\n help='Number of batch_size. Default is 256.')\n parser.add_argument('--edge-dim', type=int, default=10,\n help='Number of edge embedding dimensions. Default is 10.')\n parser.add_argument('--att-dim', type=int, default=20,\n help='Number of attention dimensions. Default is 20.')\n parser.add_argument('--negative-samples', type=int, default=5,\n help='Negative samples for optimization. Default is 5.')\n parser.add_argument('--neighbor-samples', type=int, default=10,\n help='Neighbor samples for aggregation. Default is 10.')\n parser.add_argument('--schema', type=str, default=None,\n help=\"Input schema for metapath random walk.\")\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n args.hidden_size,\n args.walk_length,\n args.walk_num,\n args.window_size,\n args.worker,\n args.epoch,\n args.batch_size,\n args.edge_dim,\n args.att_dim,\n args.negative_samples,\n args.neighbor_samples,\n args.schema,\n )\n\n def __init__(\n self,\n dimension,\n walk_length,\n walk_num,\n window_size,\n worker,\n epoch,\n batch_size,\n edge_dim,\n att_dim,\n negative_samples,\n neighbor_samples,\n schema,\n ):\n super(GATNE, self).__init__()\n self.embedding_size = dimension\n self.walk_length = walk_length\n self.walk_num = walk_num\n self.window_size = window_size\n self.worker = worker\n self.epochs = epoch\n self.batch_size = batch_size\n self.embedding_u_size = edge_dim\n self.dim_att = att_dim\n self.num_sampled = negative_samples\n self.neighbor_samples = neighbor_samples\n self.schema = schema\n\n self.multiplicity = True\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def train(self, network_data):\n all_walks = generate_walks(network_data, self.walk_num, self.walk_length, schema=self.schema)\n vocab, index2word = generate_vocab(all_walks)\n train_pairs = generate_pairs(all_walks, vocab)\n\n edge_types = list(network_data.keys())\n\n num_nodes = len(index2word)\n edge_type_count = len(edge_types)\n\n epochs = self.epochs\n batch_size = self.batch_size\n embedding_size = self.embedding_size\n embedding_u_size = self.embedding_u_size\n num_sampled = self.num_sampled\n dim_att = self.dim_att\n neighbor_samples = self.neighbor_samples\n\n neighbors = [[[] for __ in range(edge_type_count)] for _ in range(num_nodes)]\n for r in range(edge_type_count):\n g = network_data[edge_types[r]]\n for (x, y) in g:\n ix = vocab[x].index\n iy = vocab[y].index\n neighbors[ix][r].append(iy)\n neighbors[iy][r].append(ix)\n for i in range(num_nodes):\n if len(neighbors[i][r]) == 0:\n neighbors[i][r] = [i] * neighbor_samples\n elif len(neighbors[i][r]) < neighbor_samples:\n neighbors[i][r].extend(\n list(\n np.random.choice(\n neighbors[i][r],\n size=neighbor_samples - len(neighbors[i][r]),\n )\n )\n )\n elif len(neighbors[i][r]) > neighbor_samples:\n neighbors[i][r] = list(\n np.random.choice(neighbors[i][r], size=neighbor_samples)\n )\n\n model = GATNEModel(\n num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_att\n )\n nsloss = NSLoss(num_nodes, num_sampled, embedding_size)\n\n model.to(self.device)\n nsloss.to(self.device)\n\n optimizer = torch.optim.Adam(\n [{\"params\": model.parameters()}, {\"params\": nsloss.parameters()}], lr=1e-4\n )\n\n for epoch in range(epochs):\n random.shuffle(train_pairs)\n batches = get_batches(train_pairs, neighbors, batch_size)\n\n data_iter = tqdm.tqdm(\n batches,\n desc=\"epoch %d\" % (epoch),\n total=(len(train_pairs) + (batch_size - 1)) // batch_size,\n bar_format=\"{l_bar}{r_bar}\",\n )\n avg_loss = 0.0\n\n for i, data in enumerate(data_iter):\n optimizer.zero_grad()\n embs = model(\n data[0].to(self.device),\n data[2].to(self.device),\n data[3].to(self.device),\n )\n loss = nsloss(data[0].to(self.device), embs, data[1].to(self.device))\n loss.backward()\n optimizer.step()\n\n avg_loss += loss.item()\n\n if i % 5000 == 0:\n post_fix = {\n \"epoch\": epoch,\n \"iter\": i,\n \"avg_loss\": avg_loss / (i + 1),\n \"loss\": loss.item(),\n }\n data_iter.write(str(post_fix))\n\n final_model = dict(zip(edge_types, [dict() for _ in range(edge_type_count)]))\n for i in range(num_nodes):\n train_inputs = torch.tensor([i for _ in range(edge_type_count)]).to(\n self.device\n )\n train_types = torch.tensor(list(range(edge_type_count))).to(self.device)\n node_neigh = torch.tensor(\n [neighbors[i] for _ in range(edge_type_count)]\n ).to(self.device)\n node_emb = model(train_inputs, train_types, node_neigh)\n for j in range(edge_type_count):\n final_model[edge_types[j]][index2word[i]] = (\n node_emb[j].cpu().detach().numpy()\n )\n return final_model\n\n\nclass GATNEModel(nn.Module):\n def __init__(\n self, num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a\n ):\n super(GATNEModel, self).__init__()\n self.num_nodes = num_nodes\n self.embedding_size = embedding_size\n self.embedding_u_size = embedding_u_size\n self.edge_type_count = edge_type_count\n self.dim_a = dim_a\n\n self.node_embeddings = Parameter(torch.FloatTensor(num_nodes, embedding_size))\n self.node_type_embeddings = Parameter(\n torch.FloatTensor(num_nodes, edge_type_count, embedding_u_size)\n )\n self.trans_weights = Parameter(\n torch.FloatTensor(edge_type_count, embedding_u_size, embedding_size)\n )\n self.trans_weights_s1 = Parameter(\n torch.FloatTensor(edge_type_count, embedding_u_size, dim_a)\n )\n self.trans_weights_s2 = Parameter(torch.FloatTensor(edge_type_count, dim_a, 1))\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.node_embeddings.data.uniform_(-1.0, 1.0)\n self.node_type_embeddings.data.uniform_(-1.0, 1.0)\n self.trans_weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))\n self.trans_weights_s1.data.normal_(std=1.0 / math.sqrt(self.embedding_size))\n self.trans_weights_s2.data.normal_(std=1.0 / math.sqrt(self.embedding_size))\n\n def forward(self, train_inputs, train_types, node_neigh):\n node_embed = self.node_embeddings[train_inputs]\n node_embed_neighbors = self.node_type_embeddings[node_neigh]\n node_embed_tmp = torch.cat(\n [\n node_embed_neighbors[:, i, :, i, :].unsqueeze(1)\n for i in range(self.edge_type_count)\n ],\n dim=1,\n )\n node_type_embed = torch.sum(node_embed_tmp, dim=2)\n\n trans_w = self.trans_weights[train_types]\n trans_w_s1 = self.trans_weights_s1[train_types]\n trans_w_s2 = self.trans_weights_s2[train_types]\n\n attention = F.softmax(\n torch.matmul(\n F.tanh(torch.matmul(node_type_embed, trans_w_s1)), trans_w_s2\n ).squeeze()\n ).unsqueeze(1)\n node_type_embed = torch.matmul(attention, node_type_embed)\n node_embed = node_embed + torch.matmul(node_type_embed, trans_w).squeeze()\n\n last_node_embed = F.normalize(node_embed, dim=1)\n\n return last_node_embed\n\n\nclass NSLoss(nn.Module):\n def __init__(self, num_nodes, num_sampled, embedding_size):\n super(NSLoss, self).__init__()\n self.num_nodes = num_nodes\n self.num_sampled = num_sampled\n self.embedding_size = embedding_size\n self.weights = Parameter(torch.FloatTensor(num_nodes, embedding_size))\n self.sample_weights = F.normalize(\n torch.Tensor(\n [\n (math.log(k + 2) - math.log(k + 1)) / math.log(num_nodes + 1)\n for k in range(num_nodes)\n ]\n ),\n dim=0,\n )\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))\n\n def forward(self, input, embs, label):\n n = input.shape[0]\n log_target = torch.log(\n torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1))\n )\n negs = torch.multinomial(\n self.sample_weights, self.num_sampled * n, replacement=True\n ).view(n, self.num_sampled)\n noise = torch.neg(self.weights[negs])\n sum_log_sampled = torch.sum(\n torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1\n ).squeeze()\n\n loss = log_target + sum_log_sampled\n return -loss.sum() / n\n\n\nclass RWGraph:\n def __init__(self, nx_G, node_type=None):\n self.G = nx_G\n self.node_type = node_type\n\n def walk(self, walk_length, start, schema=None):\n # Simulate a random walk starting from start node.\n G = self.G\n\n rand = random.Random()\n\n if schema:\n schema_items = schema.split(\"-\")\n assert schema_items[0] == schema_items[-1]\n\n walk = [start]\n while len(walk) < walk_length:\n cur = walk[-1]\n candidates = []\n for node in G[cur].keys():\n if (\n schema == None\n or self.node_type[node]\n == schema_items[len(walk) % (len(schema_items) - 1)]\n ):\n candidates.append(node)\n if candidates:\n walk.append(rand.choice(candidates))\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length, schema=None):\n G = self.G\n walks = []\n nodes = list(G.nodes())\n # print('Walk iteration:')\n if schema is not None:\n schema_list = schema.split(\",\")\n for walk_iter in range(num_walks):\n random.shuffle(nodes)\n for node in nodes:\n if schema is None:\n walks.append(self.walk(walk_length=walk_length, start=node))\n else:\n for schema_iter in schema_list:\n if schema_iter.split(\"-\")[0] == self.node_type[node]:\n walks.append(\n self.walk(\n walk_length=walk_length,\n start=node,\n schema=schema_iter,\n )\n )\n\n return walks\n\n\ndef get_G_from_edges(edges):\n edge_dict = dict()\n for edge in edges:\n edge_key = str(edge[0]) + \"_\" + str(edge[1])\n if edge_key not in edge_dict:\n edge_dict[edge_key] = 1\n else:\n edge_dict[edge_key] += 1\n tmp_G = nx.Graph()\n for edge_key in edge_dict:\n weight = edge_dict[edge_key]\n x = int(edge_key.split(\"_\")[0])\n y = int(edge_key.split(\"_\")[1])\n tmp_G.add_edge(x, y)\n tmp_G[x][y][\"weight\"] = weight\n return tmp_G\n\n\ndef generate_pairs(all_walks, vocab, window_size=5):\n pairs = []\n skip_window = window_size // 2\n for layer_id, walks in enumerate(all_walks):\n for walk in walks:\n for i in range(len(walk)):\n for j in range(1, skip_window + 1):\n if i - j >= 0:\n pairs.append(\n (vocab[walk[i]].index, vocab[walk[i - j]].index, layer_id)\n )\n if i + j < len(walk):\n pairs.append(\n (vocab[walk[i]].index, vocab[walk[i + j]].index, layer_id)\n )\n return pairs\n\n\ndef generate_vocab(all_walks):\n index2word = []\n raw_vocab = defaultdict(int)\n\n for walks in all_walks:\n for walk in walks:\n for word in walk:\n raw_vocab[word] += 1\n\n vocab = {}\n for word, v in iteritems(raw_vocab):\n vocab[word] = Vocab(count=v, index=len(index2word))\n index2word.append(word)\n\n index2word.sort(key=lambda word: vocab[word].count, reverse=True)\n for i, word in enumerate(index2word):\n vocab[word].index = i\n\n return vocab, index2word\n\n\ndef get_batches(pairs, neighbors, batch_size):\n n_batches = (len(pairs) + (batch_size - 1)) // batch_size\n\n # result = []\n for idx in range(n_batches):\n x, y, t, neigh = [], [], [], []\n for i in range(batch_size):\n index = idx * batch_size + i\n if index >= len(pairs):\n break\n x.append(pairs[index][0])\n y.append(pairs[index][1])\n t.append(pairs[index][2])\n neigh.append(neighbors[pairs[index][0]])\n yield torch.tensor(x), torch.tensor(y), torch.tensor(t), torch.tensor(neigh)\n\n\ndef generate_walks(network_data, num_walks, walk_length, schema=None):\n if schema is not None:\n # TODO: node_type = load_node_type(file_name + '/node_type.txt')\n pass\n else:\n node_type = None\n\n all_walks = []\n for layer_id in network_data:\n tmp_data = network_data[layer_id]\n # start to do the random walk on a layer\n\n layer_walker = RWGraph(get_G_from_edges(tmp_data))\n layer_walks = layer_walker.simulate_walks(num_walks, walk_length, schema=schema)\n\n all_walks.append(layer_walks)\n\n return all_walks\n",
"import numpy as np\nimport tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_cluster import random_walk\n\nfrom .dgi import LogRegTrainer\nfrom .. import register_model, BaseModel\nfrom cogdl.models.nn.graphsage import sage_sampler, GraphSAGELayer\n\n\nclass SAGE(nn.Module):\n \"\"\"\n Implementation of unsupervised GraphSAGE in paper `\"Inductive Representation Learning on Large Graphs\"` <https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf>\n\n Parameters\n ----------\n num_features : int\n Size of each input sample\n hidden_size : int\n num_layers : int\n The number of GNN layers.\n samples_size : list\n The number sampled neighbors of different orders \n dropout : float\n walk_length : int\n The length of random walk\n negative_samples : int\n \"\"\"\n def __init__(\n self, num_features, hidden_size, num_layers, sample_size, dropout, walk_length, negative_samples\n ):\n super(SAGE, self).__init__()\n self.adjlist = {}\n self.num_features = num_features\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.sample_size = sample_size\n self.dropout = dropout\n self.walk_length = walk_length\n self.num_negative_samples = negative_samples\n self.walk_res = None\n self.num_nodes = None\n self.negative_samples = None\n\n shapes = [num_features] + [hidden_size] * num_layers\n\n self.convs = nn.ModuleList(\n [\n GraphSAGELayer(shapes[layer], shapes[layer+1])\n for layer in range(num_layers)\n ]\n )\n\n def forward(self, x, edge_index):\n for i in range(self.num_layers):\n edge_index_sp = self.sampling(edge_index, self.sample_size[i]).to(x.device)\n x = self.convs[i](x, edge_index_sp)\n if i != self.num_layers - 1:\n x = F.relu(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n return x\n\n def loss(self, data):\n x = self.forward(data.x, data.edge_index)\n device = x.device\n # if self.walk_res is None:\n self.walk_res = random_walk(data.edge_index[0], data.edge_index[1],\n start=torch.arange(0, x.shape[0]).to(device),\n walk_length=self.walk_length)[:, 1:]\n\n if not self.num_nodes:\n self.num_nodes = int(torch.max(data.edge_index)) + 1\n\n # if self.negative_samples is None:\n self.negative_samples = torch.from_numpy(\n np.random.choice(self.num_nodes, (self.num_nodes, self.num_negative_samples))\n ).to(device)\n\n pos_loss = -torch.log(\n torch.sigmoid(\n torch.sum(x.unsqueeze(1).repeat(1, self.walk_length, 1) * x[self.walk_res], dim=-1)\n )\n ).mean()\n neg_loss = -torch.log(\n torch.sigmoid(-torch.sum(x.unsqueeze(1).repeat(1, self.num_negative_samples, 1) * x[self.negative_samples], dim=-1))\n ).mean()\n return (pos_loss + neg_loss)/2\n\n def embed(self, data):\n emb = self.forward(data.x, data.edge_index)\n return emb\n \n def sampling(self, edge_index, num_sample):\n return sage_sampler(self.adjlist, edge_index, num_sample)\n\n\n\n@register_model(\"unsup_graphsage\")\nclass Graphsage(BaseModel):\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument(\"--num-features\", type=int)\n parser.add_argument(\"--hidden-size\", type=int, default=128)\n parser.add_argument(\"--num-layers\", type=int, default=2)\n parser.add_argument(\"--sample-size\", type=int, nargs='+', default=[10, 10])\n parser.add_argument(\"--dropout\", type=float, default=0.5)\n parser.add_argument(\"--walk-length\", type=int, default=10)\n parser.add_argument(\"--negative-samples\", type=int, default=30)\n parser.add_argument(\"--lr\", type=float, default=0.001)\n\n parser.add_argument(\"--max-epochs\", type=int, default=3000)\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n args.num_features,\n args.hidden_size,\n args.num_classes,\n args.num_layers,\n args.sample_size,\n args.dropout,\n args.walk_length,\n args.negative_samples,\n args.lr,\n args.max_epochs,\n args.patience,\n )\n\n def __init__(\n self, num_features, hidden_size, num_classes, num_layers,\n sample_size, dropout, walk_length, negative_samples, lr, epochs, patience\n ):\n super(Graphsage, self).__init__()\n self.model = SAGE(num_features, hidden_size, num_layers, sample_size, dropout, walk_length, negative_samples)\n self.epochs = epochs\n self.patience = patience\n self.lr = lr\n self.nhid = hidden_size\n self.nclass = num_classes\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n def train(self, data):\n data.apply(lambda x: x.to(self.device))\n self.model.to(self.device)\n device = data.x.device\n best = 1e9\n cnt_wait = 0\n optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=0.0)\n\n epoch_iter = tqdm.tqdm(range(self.epochs))\n for epoch in epoch_iter:\n self.model.train()\n optimizer.zero_grad()\n\n loss = self.model.loss(data)\n epoch_iter.set_description(f'Epoch: {epoch:03d}, Loss: {loss.item():.4f}')\n\n if loss < best:\n best = loss\n best_t = epoch\n cnt_wait = 0\n else:\n cnt_wait += 1\n\n if cnt_wait == self.patience:\n print('Early stopping!')\n break\n\n loss.backward()\n optimizer.step()\n self.model.eval()\n embeds = self.model.embed(data).detach()\n\n opt = {\n \"idx_train\": data.train_mask,\n \"idx_val\": data.val_mask,\n \"idx_test\": data.test_mask,\n \"num_classes\": self.nclass\n }\n result = LogRegTrainer().train(embeds, data.y, opt)\n return result\n\n"
] |
[
[
"torch.Size",
"torch.nn.functional.nll_loss",
"numpy.random.choice",
"scipy.sparse.eye",
"numpy.power",
"numpy.min",
"scipy.sparse.diags",
"torch.from_numpy",
"numpy.ones",
"numpy.max",
"torch.arange",
"torch.sparse.FloatTensor",
"numpy.isinf",
"numpy.vstack"
],
[
"torch.randint",
"torch.max",
"torch.zeros",
"torch.randn",
"torch.cuda.is_available"
],
[
"torch.device"
],
[
"torch.nn.functional.normalize",
"numpy.random.choice",
"torch.sum",
"torch.neg",
"torch.multinomial",
"torch.tensor",
"torch.matmul",
"torch.mul",
"torch.FloatTensor",
"torch.cuda.is_available"
],
[
"torch.max",
"numpy.random.choice",
"torch.nn.functional.dropout",
"torch.nn.functional.relu",
"torch.cuda.is_available",
"torch.arange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samuelsmal/drosophVAE
|
[
"4b1887e55a5eed1d26c07b6c43de59ffab5fc7c7",
"4b1887e55a5eed1d26c07b6c43de59ffab5fc7c7"
] |
[
"drosoph_vae/settings/data.py",
"drosoph_vae/settings/skeleton.py"
] |
[
"from enum import Enum\nfrom collections import namedtuple\nimport json\nimport pickle\nfrom datetime import datetime\nimport pathlib\nimport numpy as np\n\nclass Behavior(Enum):\n WALK_FORW = 0\n WALK_BACKW = 1\n PUSH_BALL = 2\n REST = 3\n GROOM_FLEG = 4\n GROOM_ANT = 5\n NONE = 6\n\nExperiment = namedtuple('Experiment', 'study_id, fly_id, experiment_id')\nLabelledSequence = namedtuple('LabelledSequence', ('sequence', 'label') + Experiment._fields)\n\n\ndef experiment_key(study_id=None, experiment_id=None, fly_id=None, obj=None):\n \"\"\"Exhibit A why duck typing is just shit sometimes\"\"\"\n\n if obj:\n return f\"{obj.study_id}-{obj.experiment_id}-{obj.fly_id}\"\n else:\n return f\"{study_id}-{experiment_id}-{fly_id}\"\n\n\n# They the ranges are half-open: [0, 14) in \"mathy\" writing\n_LABELLED_DATA_RAW_ = [\n (( 0, 140), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '001_SG1'),\n ((140, 460), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly6', '001_SG1'),\n ((600, 750), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly6', '001_SG1'),\n ((750, 900), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '001_SG1'),\n\n (( 0, 140), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '002_SG1'),\n ((140, 500), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly6', '002_SG1'),\n ((630, 800), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly6', '002_SG1'),\n ((790, 900), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '002_SG1'),\n\n (( 0, 140), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '003_SG1'),\n ((140, 500), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly6', '003_SG1'),\n ((570, 750), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly6', '003_SG1'),\n\n (( 0, 140), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '004_SG1'),\n ((140, 500), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly6', '004_SG1'),\n ((600, 750), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly6', '004_SG1'),\n\n (( 0, 140), Behavior.REST, '180919_MDN_CsCh', 'Fly6', '005_SG1'),\n ((140, 500), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly6', '005_SG1'),\n ((600, 750), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly6', '005_SG1'),\n\n (( 0, 150), Behavior.GROOM_FLEG, '180921_aDN_CsCh', 'Fly6', '003_SG1'),\n ((170, 350), Behavior.GROOM_ANT, '180921_aDN_CsCh', 'Fly6', '003_SG1'),\n ((450, 600), Behavior.REST, '180921_aDN_CsCh', 'Fly6', '003_SG1'),\n\n (( 0, 150), Behavior.REST, '180921_aDN_CsCh', 'Fly6', '001_SG1'),\n ((180, 350), Behavior.GROOM_ANT, '180921_aDN_CsCh', 'Fly6', '001_SG1'),\n ((400, 580), Behavior.REST, '180921_aDN_CsCh', 'Fly6', '001_SG1'),\n\n ((250, 600), Behavior.WALK_BACKW, '180918_MDN_CsCh', 'Fly2', '004_SG1'),\n\n ((190, 300), Behavior.GROOM_ANT, '180921_aDN_CsCh', 'Fly4', '003_SG1'),\n\n ((400, 900), Behavior.WALK_FORW, '180918_MDN_PR', 'Fly1', '003_SG1'),\n\n (( 0, 500), Behavior.REST, '180918_MDN_PR', 'Fly1', '004_SG1'),\n ((650, 900), Behavior.WALK_FORW, '180918_MDN_PR', 'Fly1', '004_SG1'),\n\n (( 0, 500), Behavior.REST, '180918_MDN_PR', 'Fly1', '005_SG1'),\n ((500, 900), Behavior.WALK_FORW, '180918_MDN_PR', 'Fly1', '005_SG1'),\n\n (( 0, 100), Behavior.PUSH_BALL, '180918_MDN_PR', 'Fly2', '001_SG1'),\n ((350, 500), Behavior.GROOM_FLEG, '180918_MDN_PR', 'Fly2', '002_SG1'),\n ((400, 530), Behavior.GROOM_FLEG, '180918_MDN_PR', 'Fly2', '003_SG1'),\n\n ((150, 230), Behavior.GROOM_ANT, '180921_aDN_CsCh', 'Fly3', '001_SG1'),\n\n #((170, 210), Behavior.WALK_BACKW, '180919_MDN_CsCh', 'Fly4', '005_SG1'),\n #((210, 600), Behavior.WALK_FORW, '180919_MDN_CsCh', 'Fly4', '005_SG1'),\n #((600, 700), Behavior.PUSH_BALL, '180919_MDN_CsCh', 'Fly4', '005_SG1'),\n\n #((600, 700), Behavior.PUSH_BALL, '180919_MDN_CsCh', 'Fly4', '005_SG1'),\n\n (( 0, 145), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((145, 225), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((225, 671), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((671, 683), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((683, 761), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((761, 778), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((778, 809), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((809, 813), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((813, 820), Behavior.WALK_BACKW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((820, 861), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((861, 868), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((868, 879), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n ((879, 900), Behavior.WALK_BACKW, '180920_aDN_CsCh', 'Fly2', '001_SG1'),\n\n (( 0, 143), Behavior.WALK_BACKW, '180920_aDN_CsCh', 'Fly2', '002_SG1'),\n ((143, 254), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '002_SG1'),\n ((254, 822), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '002_SG1'),\n ((822, 900), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '002_SG1'),\n\n (( 0, 145), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((145, 247), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((247, 653), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((653, 785), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((785, 803), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((803, 820), Behavior.NONE, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((820, 859), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n ((859, 900), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '003_SG1'),\n\n (( 0, 147), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n ((147, 235), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n ((235, 657), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n ((657, 816), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n ((816, 820), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n ((820, 900), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '004_SG1'),\n\n (( 0, 144), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((144, 226), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((226, 239), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((239, 253), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((253, 267), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((267, 278), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((278, 656), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((656, 659), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((659, 665), Behavior.GROOM_ANT, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((665, 757), Behavior.WALK_FORW, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((757, 768), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((768, 799), Behavior.WALK_BACKW, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n ((799, 900), Behavior.REST, '180920_aDN_CsCh', 'Fly2', '005_SG1'),\n]\n\ndef dummy_data_complex_sine_like(length):\n DummyBehaviour = namedtuple('DummyBehaviour', 'type amplitude fraction frequency')\n # make sure that the fractions add up to 1.\n # cluster id, behaviour\n _dummy_behaviours_ = [\n (0, ('sinoid', 1.0, 0.1, 2)),\n (1, ('flat', 0.0, 0.2, 0)),\n (2, ('sinoid', 1.0, 0.2, 3)),\n (3, ('sinoid', 1.0, 0.1, 5)),\n (4, ('flat', 1.0, 0.2, 0)),\n (2, ('sinoid', .5, .2, 3)),\n ]\n\n\n cur_idx = 0\n nb_frames = length\n\n _new_frames_ = np.zeros(nb_frames)\n _cluster_assignments_ = np.zeros(nb_frames)\n\n for l, db in _dummy_behaviours_:\n db = DummyBehaviour(*db)\n cur_idx_end = np.int(nb_frames * db.fraction + cur_idx)\n idx = np.s_[cur_idx:cur_idx_end]\n if db.type == 'sinoid':\n _new_frames_[idx] = db.amplitude * np.sin(np.pi * np.linspace(0, 2, cur_idx_end - cur_idx) * db.frequency)\n elif db.type == 'flat':\n _new_frames_[idx] = db.amplitude\n\n _cluster_assignments_[idx] = l\n\n cur_idx = cur_idx_end\n\n return _new_frames_, _cluster_assignments_\n\n\nLABELLED_SEQUENCES = [LabelledSequence._make(i) for i in _LABELLED_DATA_RAW_]\nEXPERIMENTS = list(set(Experiment(study_id=l.study_id, fly_id=l.fly_id,\n experiment_id=l.experiment_id) \\\n for l in LABELLED_SEQUENCES))\n",
"from enum import Enum\n\nimport numpy as np\n\nnum_cameras = 7\n\n\nclass Tracked(Enum):\n BODY_COXA = 0\n COXA_FEMUR = 1\n FEMUR_TIBIA = 2\n TIBIA_TARSUS = 3\n TARSUS_TIP = 4\n ANTENNA = 5\n STRIPE = 6\n\n\ntracked_points = [Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.ANTENNA,\n Tracked.STRIPE, Tracked.STRIPE, Tracked.STRIPE,\n Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,\n Tracked.ANTENNA,\n Tracked.STRIPE, Tracked.STRIPE, Tracked.STRIPE]\n\nlimb_id = [0, 0, 0, 0, 0,\n 1, 1, 1, 1, 1,\n 2, 2, 2, 2, 2,\n 3,\n 4, 4, 4,\n 5, 5, 5, 5, 5,\n 6, 6, 6, 6, 6,\n 7, 7, 7, 7, 7,\n 8,\n 9, 9, 9]\n\n__limb_visible_left = [True, True, True, True, True,\n False, False, False, False, False]\n\n__limb_visible_right = [False, False, False, False, False,\n True, True, True, True, True]\n\n__limb_visible_mid = [True, True, False, True, False,\n True, True, False, True, False]\n\nbones = [[0, 1], [1, 2], [2, 3], [3, 4],\n [5, 6], [6, 7], [7, 8], [8, 9],\n [10, 11], [11, 12], [12, 13], [13, 14],\n [16, 17], [17, 18],\n [19, 20], [20, 21], [21, 22], [22, 23],\n [24, 25], [25, 26], [26, 27], [27, 28],\n [29, 30], [30, 31], [31, 32], [32, 33],\n [35, 36], [36, 37]]\n\n# bones3d = [[15, 34], [15, 16], [34, 16]]\nbones3d = [[15, 34]]\n\ncolors = [(255, 0, 0),\n (0, 0, 255),\n (0, 255, 0),\n (150, 200, 200),\n (255, 165, 0),\n (255, 255, 0),\n (255, 0, 255),\n (0, 255, 255),\n (150, 200, 200),\n (255, 165, 0)]\n\nnum_joints = len(tracked_points)\nnum_limbs = len(set(limb_id))\n\n\ndef is_body_coxa(joint_id):\n return tracked_points[joint_id] == Tracked.BODY_COXA\n\n\ndef is_coxa_femur(joint_id):\n return tracked_points[joint_id] == Tracked.COXA_FEMUR\n\n\ndef is_femur_tibia(joint_id):\n return tracked_points[joint_id] == Tracked.FEMUR_TIBIA\n\n\ndef is_tibia_tarsus(joint_id):\n return tracked_points[joint_id] == Tracked.TIBIA_TARSUS\n\n\ndef is_antenna(joint_id):\n return tracked_points[joint_id] == Tracked.ANTENNA\n\n\ndef is_stripe(joint_id):\n return tracked_points[joint_id] == Tracked.STRIPE\n\n\ndef is_tarsus_tip(joint_id):\n return tracked_points[joint_id] == Tracked.TARSUS_TIP\n\n\ndef get_limb_id(joint_id):\n return limb_id[joint_id]\n\n\ndef is_joint_visible_left(joint_id):\n return __limb_visible_left[get_limb_id(joint_id)]\n\n\ndef is_joint_visible_right(joint_id):\n return __limb_visible_right[get_limb_id(joint_id)]\n\n\ndef is_limb_visible_left(limb_id):\n return __limb_visible_left[limb_id]\n\n\ndef is_limb_visible_right(limb_id):\n return __limb_visible_right[limb_id]\n\ndef is_limb_visible_mid(limb_id):\n return __limb_visible_mid[limb_id]\n\ndef camera_see_limb(camera_id, limb_id):\n if camera_id < 3:\n return is_limb_visible_left(limb_id)\n elif camera_id==3:\n return is_limb_visible_mid(limb_id)\n elif camera_id > 3:\n return is_limb_visible_right(limb_id)\n else:\n raise NotImplementedError\n\ndef camera_see_joint(camera_id, joint_id):\n if camera_id in [2, 4]: # they cannot see the stripes\n return camera_see_limb(camera_id, limb_id[joint_id]) and not (tracked_points[joint_id]==Tracked.STRIPE and not (limb_id[joint_id] not in [2, 6]))\n elif camera_id == 3:\n return camera_see_limb(camera_id, limb_id[joint_id]) and tracked_points[joint_id] != Tracked.BODY_COXA\n else:\n return camera_see_limb(camera_id, limb_id[joint_id])\n\n\n\nbone_param = np.ones((num_joints, 2), dtype=float)\nbone_param[:, 0] = 0.85\nbone_param[:, 1] = 0.2\nfor joint_id in range(num_joints):\n if is_body_coxa(joint_id) or is_stripe(joint_id) or is_antenna(joint_id):\n bone_param[joint_id, 1] = 10000 # no bone\n\nignore_joint_id = [joint_id for joint_id in\n range(num_joints) if\n is_body_coxa(joint_id) or is_coxa_femur(joint_id) or is_antenna(joint_id)]\n\nignore_joint_id_wo_stripe = [joint_id for joint_id in\n range(num_joints) if\n is_body_coxa(joint_id) or is_coxa_femur(joint_id) or is_antenna(joint_id)]\n"
] |
[
[
"numpy.int",
"numpy.zeros",
"numpy.linspace"
],
[
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sebastian-lapuschkin/Quantus
|
[
"c3b8a9fb2018f34bd89ba38efa2b2b8c38128b3f"
] |
[
"quantus/metrics/randomisation_metrics.py"
] |
[
"\"\"\"This module contains the collection of randomisation metrics to evaluate attribution-based explanations of neural network models.\"\"\"\nimport random\nimport warnings\nfrom typing import Callable, Dict, List, Union\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom .base import Metric\nfrom ..helpers import asserts\nfrom ..helpers import utils\nfrom ..helpers import warn_func\nfrom ..helpers.asserts import attributes_check\nfrom ..helpers.model_interface import ModelInterface\nfrom ..helpers.normalise_func import normalise_by_negative\nfrom ..helpers.similar_func import correlation_spearman, ssim\n\n\nclass ModelParameterRandomisation(Metric):\n \"\"\"\n Implementation of the Model Parameter Randomization Method by Adebayo et. al., 2018.\n\n The Model Parameter Randomization measures the distance between the original attribution and a newly computed\n attribution throughout the process of cascadingly/independently randomizing the model parameters of one layer\n at a time.\n\n References:\n 1) Adebayo, J., Gilmer, J., Muelly, M., Goodfellow, I., Hardt, M., and Kim, B. \"Sanity Checks for Saliency Maps.\"\n arXiv preprint, arXiv:1810.073292v3 (2018)\n\n Assumptions:\n In the original paper multiple distance measures are taken: Spearman rank correlation (with and without abs),\n HOG and SSIM. We have set Spearman as the default value.\n \"\"\"\n\n @attributes_check\n def __init__(self, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n args: Arguments (optional)\n kwargs: Keyword arguments (optional)\n abs (boolean): Indicates whether absolute operation is applied on the attribution, default=True.\n normalise (boolean): Indicates whether normalise operation is applied on the attribution, default=True.\n normalise_func (callable): Attribution normalisation function applied in case normalise=True,\n default=normalise_by_negative.\n default_plot_func (callable): Callable that plots the metrics result.\n disable_warnings (boolean): Indicates whether the warnings are printed, default=False.\n display_progressbar (boolean): Indicates whether a tqdm-progress-bar is printed, default=False.\n similarity_func (callable): Similarity function applied to compare input and perturbed input,\n default=correlation_spearman.\n layer_order (string): Indicated whether the model is randomized cascadingly or independently.\n Set order=top_down for cascading randomization, set order=independent for independent randomization,\n default=\"independent\".\n \"\"\"\n super().__init__()\n\n self.args = args\n self.kwargs = kwargs\n self.abs = self.kwargs.get(\"abs\", True)\n self.normalise = self.kwargs.get(\"normalise\", True)\n self.normalise_func = self.kwargs.get(\"normalise_func\", normalise_by_negative)\n self.default_plot_func = Callable\n self.disable_warnings = self.kwargs.get(\"disable_warnings\", False)\n self.display_progressbar = self.kwargs.get(\"display_progressbar\", False)\n self.similarity_func = self.kwargs.get(\"similarity_func\", correlation_spearman)\n self.layer_order = kwargs.get(\"layer_order\", \"independent\")\n self.seed = self.kwargs.get(\"seed\", 42)\n self.last_results = {}\n self.all_results = []\n\n # Asserts and warnings.\n asserts.assert_layer_order(layer_order=self.layer_order)\n if not self.disable_warnings:\n warn_func.warn_parameterisation(\n metric_name=self.__class__.__name__,\n sensitive_params=(\n \"similarity metric 'similarity_func' and the order of \"\n \"the layer randomisation 'layer_order'\"\n ),\n citation=(\n \"Adebayo, J., Gilmer, J., Muelly, M., Goodfellow, I., Hardt, M., and Kim, B. \"\n \"'Sanity Checks for Saliency Maps.' arXiv preprint,\"\n \" arXiv:1810.073292v3 (2018)\"\n ),\n )\n warn_func.warn_attributions(normalise=self.normalise, abs=self.abs)\n\n def __call__(\n self,\n model: ModelInterface,\n x_batch: np.array,\n y_batch: np.array,\n a_batch: Union[np.array, None],\n *args,\n **kwargs\n ) -> List[float]:\n \"\"\"\n This implementation represents the main logic of the metric and makes the class object callable.\n It completes batch-wise evaluation of some explanations (a_batch) with respect to some input data\n (x_batch), some output labels (y_batch) and a torch model (model).\n\n Parameters\n model: a torch model e.g., torchvision.models that is subject to explanation\n x_batch: a np.ndarray which contains the input data that are explained\n y_batch: a np.ndarray which contains the output labels that are explained\n a_batch: a Union[np.ndarray, None] which contains pre-computed attributions i.e., explanations\n args: Arguments (optional)\n kwargs: Keyword arguments (optional)\n channel_first (boolean): Indicates of the image dimensions are channel first, or channel last.\n Inferred from the input shape by default.\n explain_func (callable): Callable generating attributions, default=Callable.\n\n Returns\n last_results: a list of float(s) with the evaluation outcome of concerned batch\n\n Examples\n # Enable GPU.\n >> device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # Load a pre-trained LeNet classification model (architecture at quantus/helpers/models).\n >> model = LeNet()\n >> model.load_state_dict(torch.load(\"tutorials/assets/mnist\"))\n\n # Load MNIST datasets and make loaders.\n >> test_set = torchvision.datasets.MNIST(root='./sample_data', download=True)\n >> test_loader = torch.utils.data.DataLoader(test_set, batch_size=24)\n\n # Load a batch of inputs and outputs to use for XAI evaluation.\n >> x_batch, y_batch = iter(test_loader).next()\n >> x_batch, y_batch = x_batch.cpu().numpy(), y_batch.cpu().numpy()\n\n # Generate Saliency attributions of the test set batch of the test set.\n >> a_batch_saliency = Saliency(model).attribute(inputs=x_batch, target=y_batch, abs=True).sum(axis=1)\n >> a_batch_saliency = a_batch_saliency.cpu().numpy()\n\n # Initialise the metric and evaluate explanations by calling the metric instance.\n >> metric = ModelParameterRandomisation(abs=True, normalise=False)\n >> scores = metric(model=model, x_batch=x_batch, y_batch=y_batch, a_batch=a_batch_saliency, **{}}\n \"\"\"\n # Reshape input batch to channel first order:\n if \"channel_first\" in kwargs and isinstance(kwargs[\"channel_first\"], bool):\n channel_first = kwargs.pop(\"channel_first\")\n else:\n channel_first = utils.infer_channel_first(x_batch)\n x_batch_s = utils.make_channel_first(x_batch, channel_first)\n\n # Wrap the model into an interface\n if model:\n model = utils.get_wrapped_model(model, channel_first)\n\n # Update kwargs.\n self.kwargs = {\n **kwargs,\n **{k: v for k, v in self.__dict__.items() if k not in [\"args\", \"kwargs\"]},\n }\n if \"img_size\" in kwargs:\n warnings.warn(\n \"argument 'img_size' is deprecated and will be removed in future versions.\"\n )\n if \"nr_channels\" in kwargs:\n warnings.warn(\n \"argument 'nr_channels' is deprecated and will be removed in future versions.\"\n )\n\n self.last_results = {}\n\n # Get explanation function and make asserts.\n explain_func = self.kwargs.get(\"explain_func\", Callable)\n asserts.assert_explain_func(explain_func=explain_func)\n\n if a_batch is None:\n\n # Generate explanations.\n a_batch = explain_func(\n model=model.get_model(),\n inputs=x_batch,\n targets=y_batch,\n **self.kwargs,\n )\n a_batch = utils.expand_attribution_channel(a_batch, x_batch_s)\n\n # Asserts.\n asserts.assert_attributions(x_batch=x_batch_s, a_batch=a_batch)\n\n # Create progress bar if desired.\n # Due to the nested for-loops and the requirement of a single progressbar,\n # manual updating will be performed at the end of each inner iteration.\n if self.display_progressbar:\n n_layers = len(\n list(model.get_random_layer_generator(order=self.layer_order))\n )\n n_iterations = n_layers * len(a_batch)\n pbar = tqdm(total=n_iterations)\n\n for layer_name, random_layer_model in model.get_random_layer_generator(\n order=self.layer_order, seed=self.seed\n ):\n\n similarity_scores = []\n\n # Generate an explanation with perturbed model.\n a_perturbed = explain_func(\n model=random_layer_model, inputs=x_batch, targets=y_batch, **self.kwargs\n )\n\n for ix, (a, a_per) in enumerate(zip(a_batch, a_perturbed)):\n\n if self.abs:\n a = np.abs(a)\n a_per = np.abs(a_per)\n\n if self.normalise:\n a = self.normalise_func(a)\n a_per = self.normalise_func(a_per)\n\n # Compute distance measure.\n similarity = self.similarity_func(a_per.flatten(), a.flatten())\n\n similarity_scores.append(similarity)\n\n # Update progress bar if desired.\n if self.display_progressbar:\n pbar.update(1)\n\n # Save similarity scores in a dictionary.\n self.last_results[layer_name] = similarity_scores\n\n # Close progress bar if desired.\n if self.display_progressbar:\n pbar.close()\n\n self.all_results.append(self.last_results)\n\n return self.last_results\n\n\nclass RandomLogit(Metric):\n \"\"\"\n Implementation of the Random Logit Metric by Sixt et al., 2020.\n\n The Random Logit Metric computes the distance between the original explanation and a reference explanation of\n a randomly chosen non-target class.\n\n References:\n 1) Sixt, Leon, Granz, Maximilian, and Landgraf, Tim. \"When Explanations Lie: Why Many Modified BP\n Attributions Fail.\"arXiv preprint, arXiv:1912.09818v6 (2020)\n \"\"\"\n\n @attributes_check\n def __init__(self, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n args: Arguments (optional)\n kwargs: Keyword arguments (optional)\n abs (boolean): Indicates whether absolute operation is applied on the attribution, default=False.\n normalise (boolean): Indicates whether normalise operation is applied on the attribution, default=True.\n normalise_func (callable): Attribution normalisation function applied in case normalise=True,\n default=normalise_by_negative.\n default_plot_func (callable): Callable that plots the metrics result.\n disable_warnings (boolean): Indicates whether the warnings are printed, default=False.\n display_progressbar (boolean): Indicates whether a tqdm-progress-bar is printed, default=False.\n similarity_func (callable): Similarity function applied to compare input and perturbed input,\n default=ssim.\n num_classes (integer): Number of prediction classes in the input, default=1000.\n \"\"\"\n super().__init__()\n\n self.args = args\n self.kwargs = kwargs\n self.abs = self.kwargs.get(\"abs\", False)\n self.normalise = self.kwargs.get(\"normalise\", True)\n self.default_plot_func = Callable\n self.disable_warnings = self.kwargs.get(\"disable_warnings\", False)\n self.display_progressbar = self.kwargs.get(\"display_progressbar\", False)\n self.normalise_func = self.kwargs.get(\"normalise_func\", normalise_by_negative)\n self.similarity_func = self.kwargs.get(\"similarity_func\", ssim)\n self.num_classes = self.kwargs.get(\"num_classes\", 1000)\n self.seed = self.kwargs.get(\"seed\", 42)\n self.last_results = []\n self.all_results = []\n\n # Asserts and warnings.\n if not self.disable_warnings:\n warn_func.warn_parameterisation(\n metric_name=self.__class__.__name__,\n sensitive_params=(\"similarity metric 'similarity_func'\"),\n citation=(\n \"Sixt, Leon, Granz, Maximilian, and Landgraf, Tim. 'When Explanations Lie: \"\n \"Why Many Modified BP Attributions Fail.' arXiv preprint, \"\n \"arXiv:1912.09818v6 (2020)\"\n ),\n )\n warn_func.warn_attributions(normalise=self.normalise, abs=self.abs)\n\n def __call__(\n self,\n model: ModelInterface,\n x_batch: np.array,\n y_batch: np.array,\n a_batch: Union[np.array, None],\n *args,\n **kwargs\n ) -> List[float]:\n \"\"\"\n This implementation represents the main logic of the metric and makes the class object callable.\n It completes batch-wise evaluation of some explanations (a_batch) with respect to some input data\n (x_batch), some output labels (y_batch) and a torch model (model).\n\n Parameters\n model: a torch model e.g., torchvision.models that is subject to explanation\n x_batch: a np.ndarray which contains the input data that are explained\n y_batch: a np.ndarray which contains the output labels that are explained\n a_batch: a Union[np.ndarray, None] which contains pre-computed attributions i.e., explanations\n args: Arguments (optional)\n kwargs: Keyword arguments (optional)\n channel_first (boolean): Indicates of the image dimensions are channel first, or channel last.\n Inferred from the input shape by default.\n explain_func (callable): Callable generating attributions, default=Callable.\n\n Returns\n last_results: a list of float(s) with the evaluation outcome of concerned batch\n\n Examples\n # Enable GPU.\n >> device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # Load a pre-trained LeNet classification model (architecture at quantus/helpers/models).\n >> model = LeNet()\n >> model.load_state_dict(torch.load(\"tutorials/assets/mnist\"))\n\n # Load MNIST datasets and make loaders.\n >> test_set = torchvision.datasets.MNIST(root='./sample_data', download=True)\n >> test_loader = torch.utils.data.DataLoader(test_set, batch_size=24)\n\n # Load a batch of inputs and outputs to use for XAI evaluation.\n >> x_batch, y_batch = iter(test_loader).next()\n >> x_batch, y_batch = x_batch.cpu().numpy(), y_batch.cpu().numpy()\n\n # Generate Saliency attributions of the test set batch of the test set.\n >> a_batch_saliency = Saliency(model).attribute(inputs=x_batch, target=y_batch, abs=True).sum(axis=1)\n >> a_batch_saliency = a_batch_saliency.cpu().numpy()\n\n # Initialise the metric and evaluate explanations by calling the metric instance.\n >> metric = RandomLogit(abs=True, normalise=False)\n >> scores = metric(model=model, x_batch=x_batch, y_batch=y_batch, a_batch=a_batch_saliency, **{}}\n \"\"\"\n # Reshape input batch to channel first order:\n if \"channel_first\" in kwargs and isinstance(kwargs[\"channel_first\"], bool):\n channel_first = kwargs.pop(\"channel_first\")\n else:\n channel_first = utils.infer_channel_first(x_batch)\n x_batch_s = utils.make_channel_first(x_batch, channel_first)\n\n # Wrap the model into an interface\n if model:\n model = utils.get_wrapped_model(model, channel_first)\n\n # Update kwargs.\n self.kwargs = {\n **kwargs,\n **{k: v for k, v in self.__dict__.items() if k not in [\"args\", \"kwargs\"]},\n }\n if \"img_size\" in kwargs:\n warnings.warn(\n \"argument 'img_size' is deprecated and will be removed in future versions.\"\n )\n if \"nr_channels\" in kwargs:\n warnings.warn(\n \"argument 'nr_channels' is deprecated and will be removed in future versions.\"\n )\n\n self.last_results = []\n\n # Get explanation function and make asserts.\n explain_func = self.kwargs.get(\"explain_func\", Callable)\n asserts.assert_explain_func(explain_func=explain_func)\n\n if a_batch is None:\n # Generate explanations.\n a_batch = explain_func(\n model=model.get_model(),\n inputs=x_batch,\n targets=y_batch,\n **self.kwargs,\n )\n a_batch = utils.expand_attribution_channel(a_batch, x_batch_s)\n\n # Asserts.\n asserts.assert_attributions(x_batch=x_batch, a_batch=a_batch)\n\n # use tqdm progressbar if not disabled\n if not self.display_progressbar:\n iterator = enumerate(zip(x_batch_s, y_batch, a_batch))\n else:\n iterator = tqdm(\n enumerate(zip(x_batch_s, y_batch, a_batch)), total=len(x_batch_s)\n )\n\n for ix, (x, y, a) in iterator:\n\n if self.abs:\n a = np.abs(a)\n\n if self.normalise:\n a = self.normalise_func(a)\n\n # Randomly select off-class labels.\n random.seed(a=self.seed)\n y_off = np.array(\n [\n random.choice(\n [y_ for y_ in list(np.arange(0, self.num_classes)) if y_ != y]\n )\n ]\n )\n\n # Explain against a random class.\n a_perturbed = explain_func(\n model=model.get_model(),\n inputs=np.expand_dims(x, axis=0),\n targets=y_off,\n **self.kwargs,\n )\n\n if self.abs:\n a_perturbed = np.abs(a_perturbed)\n\n if self.normalise:\n a_perturbed = self.normalise_func(a_perturbed)\n\n self.last_results.append(\n self.similarity_func(a.flatten(), a_perturbed.flatten())\n )\n\n self.all_results.append(self.last_results)\n\n return self.last_results\n"
] |
[
[
"numpy.arange",
"numpy.expand_dims",
"numpy.abs"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ddddwee1/SULT
|
[
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784",
"0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784"
] |
[
"example/FaceResNet/evaluation.py",
"SUL1/sample/feature_fusion/iterative/conv3d_model.py",
"SUL1/sample/conditional_gan/condgan.py",
"example/RepNet/train.py",
"example/FaceVGG/datareader.py",
"SUL_torch/example/HRNet/datareader.py",
"SUL_torch/example/tf2torch/torch/eval.py",
"example/RepNet/network.py",
"SUL_torch/example/torch2caffe/Layers.py",
"SUL_torch/example/HRNet/hrnet.py",
"example/HRNet/v2/train.py",
"example/HumanPoseDetector/train.py"
] |
[
"import tensorflow as tf \nimport model3 as M \nimport numpy as np \nimport resnet\nimport cv2\n\nclass FaceResNet(M.Model):\n\tdef initialize(self):\n\t\tself.resnet = resnet.ResNet([64,64,128,256,512], [3, 4, 14, 3], 512)\n\n\tdef forward(self, x):\n\t\tfeat = self.resnet(x)\n\t\treturn feat\n\ntf.keras.backend.set_learning_phase(True)\n\nmodel = FaceResNet()\noptimizer = tf.keras.optimizers.Adam(0.0001)\nsaver = M.Saver(model, optimizer)\nsaver.restore('./model/')\n\t\ndef extract_feature(imgname):\n\timg = cv2.imread(imgname)\n\timg = np.float32(img)[None,...]\n\tfeat = model(img).numpy()[0]\n\tfeat = feat.reshape([-1])\n\tfeat = feat / np.linalg.norm(feat)\n\treturn feat \n\nfeat = extract_feature('1.jpg')\n",
"import tensorflow as tf \nimport numpy as np \nimport model as M \n\nclass video_conv3d():\n\tdef __init__(self,classnum, accum=1,isTraining=True, model_path='./model/'):\n\t\tself.accumulation = accum\n\t\tself.classnum = classnum\n\t\tself.model_path = model_path\n\t\tself.global_step = 0\n\t\t# create input placeholder and label placeholder\n\t\tself.input_holder = tf.placeholder(tf.float32,[None,None,16,112,112,3])\n\t\tself.lab_holder = tf.placeholder(tf.float32,[None,classnum])\n\t\tself.dropout = tf.placeholder(tf.float32)\n\n\t\t# build model and classifier and optimizer\n\t\tfeat = tf.map_fn(self.model,self.input_holder)\n\t\tself.feat = self.feat_fusion(feat)\n\n\t\tself.build_classifier()\n\n\t\t# create session and saver\n\t\tself.sess = tf.Session()\n\t\tself.saver = tf.train.Saver()\n\t\tM.loadSess(self.model_path,self.sess,init=True)\n\n\tdef model(self,inp):\n\t\twith tf.variable_scope('conv3d_incep',reuse=tf.AUTO_REUSE):\n\t\t\tmod = M.Model(inp)\n\t\t\tself.blk_num = 0\n\t\t\tmod.conv3dLayer(3,64,activation=M.PARAM_LRELU) \n\t\t\tmod.maxpool3dLayer([1,2,2],stride=[1,2,2]) # 56\n\t\t\tmod.conv3dLayer(3,128,activation=M.PARAM_LRELU)\n\t\t\tmod.maxpool3dLayer(2) # 28\n\t\t\tmod.conv3dLayer(3,256,activation=M.PARAM_LRELU)\n\t\t\tmod.conv3dLayer(3,256,activation=M.PARAM_LRELU)\n\t\t\tmod.maxpool3dLayer(2) # 14\n\t\t\tmod.conv3dLayer(3,512,activation=M.PARAM_LRELU)\n\t\t\tmod.conv3dLayer(3,512,activation=M.PARAM_LRELU)\n\t\t\tmod.maxpool3dLayer(2) # 7\n\t\t\tmod.conv3dLayer(3,512,activation=M.PARAM_LRELU)\n\t\t\tmod.conv3dLayer(3,512,activation=M.PARAM_LRELU)\n\t\t\tmod.maxpool3dLayer(2) # 4\n\t\t\tprint(mod.get_current_layer())\n\t\t\tmod.flatten()\n\t\t\tmod.fcLayer(2048,activation=M.PARAM_LRELU)\n\t\t\tmod.dropout(self.dropout)\n\t\t\tmod.fcLayer(1024,activation=M.PARAM_LRELU)\n\t\t\t# mod.dropout(self.dropout)\n\n\t\treturn mod.get_current_layer()\n\n\tdef feat_fusion(self,feats):\n\t\twith tf.variable_scope('fusion'):\n\t\t\tmod = M.Model(feats)\n\t\t\tmod.dyn_route(3)\n\t\treturn mod.get_current_layer()\n\n\tdef build_classifier(self):\n\t\twith tf.variable_scope('classifier'):\n\t\t\tlogit_layer, eval_layer = M.enforcedClassifier(self.feat, self.lab_holder, dropout=1., multi=None)\n\t\t\t# logit_layer = eval_layer = self.feat\n\t\t\tself.accuracy = M.accuracy(eval_layer, tf.argmax(self.lab_holder,-1))\n\t\tself.eval_layer = eval_layer\n\t\twith tf.variable_scope('optimizer'):\n\t\t\tself.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logit_layer, labels=self.lab_holder))\n\t\t\twith tf.control_dependencies(M.get_update_ops()):\n\t\t\t\ttrainer = M.Trainer(0.0001, self.loss)\n\t\t\t\tself.train_step = trainer.train()\n\t\t\t\tself.accum_step = trainer.accumulate()\n\n\tdef train(self, inp, lab, normalize=True):\n\t\tself.global_step += 1\n\t\tinp = np.float32(inp)\n\t\tlab = np.float32(lab)\n\t\tif normalize:\n\t\t\tinp = inp / 127.5 - 1.\n\t\ttrain_step = self.train_step if self.global_step%self.accumulation==0 else self.accum_step\n\t\tls,acc, _ = self.sess.run([self.loss, self.accuracy, train_step], feed_dict = {self.input_holder:inp, self.lab_holder:lab,\\\n\t\t\t\t\t\t\t\t\tself.dropout:0.5})\n\t\treturn ls,acc \n\n\tdef eval(self, inp, lab, normalize=True):\n\t\tinp = np.float32(inp)\n\t\tlab = np.float32(lab)\n\t\tif normalize:\n\t\t\tinp = inp / 127.5 - 1.\n\t\tls,acc = self.sess.run([self.loss, self.accuracy], feed_dict = {self.input_holder:inp, self.lab_holder:lab, self.dropout:1.0})\n\t\treturn ls,acc \n\n\tdef get_score(self, inp, normalize=True):\n\t\tinp = np.float32(inp)\n\t\tif normalize:\n\t\t\tinp = inp / 127.5 - 1.\n\t\tscr = self.sess.run(self.eval_layer, feed_dict = {self.input_holder:inp, self.dropout:1.0})\n\t\treturn scr \n\n\tdef get_feature(self, inp, normalize=True):\n\t\tinp = np.float32(inp)\n\t\tif normalize:\n\t\t\tinp = inp / 127.5 - 1.\n\t\tfeat = self.sess.run(self.feat, feed_dict = {self.input_holder:inp, self.dropout:1.0})\n\t\treturn feat \n\n\tdef save(self, name):\n\t\tprint('Saving model to',self.model_path+name,'...')\n\t\tself.saver.save(self.sess, self.model_path+name)\n",
"import tensorflow as tf \nimport model as M \nimport numpy as np \nimport cv2\nfrom datetime import datetime\nimport random\n\nZDIM = 64\nIMGPIX = 128\nwith tf.name_scope('vecinp'):\n\tz = tf.placeholder(tf.float32,[None,ZDIM])\nwith tf.name_scope('img'):\n\timgholder = tf.placeholder(tf.float32,[None,128,128,1])\nwith tf.name_scope('classInp'):\n\tclassholder = tf.placeholder(tf.int64,[None])\n\nVARS = {}\nBSIZE = 32\nLR = 0.0002\nBETA=0.4\nCLASS = 1000\n\ndef gen(inp,shape,reuse=False):\n\twith tf.variable_scope('Generator',reuse=reuse):\n\t\tmod = M.Model(inp,shape)\n\t\tmod.fcLayer(4*4*512)\n\t\tmod.construct([4,4,512])\n\t\tmod.deconvLayer(4,256,stride=2,activation=M.PARAM_RELU,batch_norm=True)#8\n\t\tmod.deconvLayer(4,128,stride=2,activation=M.PARAM_RELU,batch_norm=True)#16\n\t\tmod.deconvLayer(4,64,stride=2,activation=M.PARAM_RELU,batch_norm=True)#32\n\t\tmod.deconvLayer(4,32,stride=2,activation=M.PARAM_RELU,batch_norm=True)#64\n\t\tmod.deconvLayer(4,16,stride=2,activation=M.PARAM_RELU,batch_norm=True)#128\n\t\tmod.deconvLayer(4,1,stride=1,activation=M.PARAM_TANH,batch_norm=True)\n\t\tVARS['g'] = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Generator')\n\t\tprint(len(VARS['g']))\n\t\treturn mod.get_current_layer()\n\ndef dis(inp,shape,reuse=False):\n\twith tf.variable_scope('Discriminator',reuse=reuse):\n\t\tmod = M.Model(inp,shape)\n\t\tmod.convLayer(5,16,stride=2,activation=M.PARAM_ELU,batch_norm=True)#64\n\t\tmod.convLayer(4,32,stride=2,activation=M.PARAM_ELU,batch_norm=True)#32\n\t\tmod.convLayer(4,64,stride=2,activation=M.PARAM_ELU,batch_norm=True)#16\n\t\tmod.convLayer(4,128,stride=2,activation=M.PARAM_ELU,batch_norm=True)#8\n\t\tmod.convLayer(4,256,stride=2,activation=M.PARAM_ELU,batch_norm=True)#4\n\t\tmod.flatten()\n\t\tmod.fcLayer(2)\n\t\tVARS['d'] = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Discriminator')\n\t\tprint(len(VARS['d']))\n\t\treturn mod.get_current_layer()\n\ndef classifier(inp,shape,reuse=False):\n\twith tf.variable_scope('Classifier',reuse=reuse):\n\t\tmod = M.Model(inp,shape)\n\t\tmod.convLayer(5,32,stride=2,activation=M.PARAM_ELU,batch_norm=True)#64\n\t\tmod.convLayer(4,64,stride=2,activation=M.PARAM_ELU,batch_norm=True)#32\n\t\tmod.convLayer(4,128,stride=2,activation=M.PARAM_ELU,batch_norm=True)#16\n\t\tmod.convLayer(4,256,stride=2,activation=M.PARAM_ELU,batch_norm=True)#8\n\t\tmod.convLayer(4,512,stride=2,activation=M.PARAM_ELU,batch_norm=True)#4\n\t\tmod.flatten()\n\t\tmod.fcLayer(ZDIM)\n\t\ta = mod.l2norm()\n\t\tmod.fcLayer(CLASS)\n\t\tVARS['c'] = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Classifier')\n\t\tprint(len(VARS['c']))\n\t\treturn mod.get_current_layer(),a[0]\n\ngenerated = gen(z,[None,ZDIM])\ndisfalse = dis(generated,[None,128,128,1])\ndistrue = dis(imgholder,[None,128,128,1],reuse=True)\nclassed,_ = classifier(imgholder,[None,128,128,1])\n_,fv = classifier(generated,[None,128,128,1],reuse=True)\n\n\nwith tf.name_scope('lossG'):\n\tlossG1 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.ones([BSIZE],dtype=tf.int64),logits=disfalse))\n\tlossG2 = tf.reduce_mean(tf.reduce_sum(tf.square(z-fv)))\n\tlossG3 = tf.reduce_mean((tf.square(imgholder - generated)))\n\ttf.summary.scalar('lossG1',lossG1)\n\ttf.summary.scalar('lossG2',lossG2)\n\ttf.summary.scalar('lossG3',lossG3)\n\tlossG = lossG1 + lossG2 + lossG3\n\t# lossG = lossG1\n\ttf.summary.scalar('lossG',lossG)\nwith tf.name_scope('lossD'):\n\tlossD1 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.ones([BSIZE],dtype=tf.int64),logits=distrue))\n\tlossD2 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.zeros([BSIZE],dtype=tf.int64),logits=disfalse))\n\tlossD = 0.5*(lossD1+lossD2)\n\ttf.summary.scalar('lossD',lossD)\nwith tf.name_scope('lossC'):\n\tlossC = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=classholder,logits=classed))\n\ttf.summary.scalar('lossC',lossC)\n\nwith tf.name_scope('opti'):\n\twith tf.name_scope('optiG'):\n\t\ttrainG = tf.train.RMSPropOptimizer(LR).minimize(lossG,var_list=VARS['g'])\n\twith tf.name_scope('optiD'):\n\t\ttrainD = tf.train.RMSPropOptimizer(LR).minimize(lossD,var_list=VARS['d'])\n\twith tf.name_scope('iptiC'):\n\t\ttrainC = tf.train.RMSPropOptimizer(LR).minimize(lossC,var_list=VARS['c'])\n\n# with tf.name_scope('opti'):\n# \twith tf.name_scope('optiG'):\n# \t\ttrainG = tf.train.AdamOptimizer(learning_rate=LR,beta1=BETA).minimize(lossG,var_list=VARS['g'])\n# \twith tf.name_scope('optiD'):\n# \t\ttrainD = tf.train.AdamOptimizer(learning_rate=LR,beta1=BETA).minimize(lossD,var_list=VARS['d'])\n# \twith tf.name_scope('iptiC'):\n# \t\ttrainC = tf.train.AdamOptimizer(learning_rate=LR,beta1=BETA).minimize(lossC,var_list=VARS['c'])\n\n# Use this block when generating imgs\n# noise = tf.placeholder(tf.float32,[None,ZDIM])\n# _,fv = classifier(imgholder,[None,128,128,1])\n# generated = gen(fv+noise,[None,ZDIM])\n\ndef getGeneratedImg(sess,it):\n\ta = np.random.uniform(size=[4,ZDIM],low=-1.0,high=1.0)\n\ta = a/np.linalg.norm(a,axis=1,keepdims=True)\n\timg = sess.run(generated,feed_dict={z:a})\n\timg = img+1\n\timg = img*127\n\timg = img.astype(np.uint8)\n\tfor i in range(4):\n\t\tcv2.imwrite('res/iter'+str(it)+'img'+str(i)+'.jpg',cv2.resize(img[i],(128,128)))\n\ndef getData():\n\tf = open('avclb2.txt')\n\tdt = []\n\tcounter = 0\n\tfor line in f:\n\t\tcounter+=1\n\t\tif (counter+1)%1000==0:\n\t\t\tprint(counter+1)\n\t\t\t# break\n\t\tl = line.replace('\\n','').split(' ')\n\t\timg = np.float32(cv2.resize(cv2.imread(l[0],0),(IMGPIX,IMGPIX))).reshape([128,128,1])\n\t\timg = img / 127.5\n\t\timg = img -1\n\t\tlb = int(l[1])\n\t\tdt.append((img,lb))\n\treturn dt\n\ndef training():\n\tmerged = tf.summary.merge_all()\n\tdata = getData()\n\tsaver = tf.train.Saver()\n\twith tf.Session() as sess:\n\t\twriter = tf.summary.FileWriter('./logs/',sess.graph)\n\t\tM.loadSess('./model/',sess=sess)\n\t\tcounter = M.counter\n\t\tfor i in range(1000000):\n\t\t\tcounter+=1\n\t\t\tsample = random.sample(data,BSIZE)\n\t\t\tx_train = [i[0] for i in sample]\n\t\t\ty_train = [i[1] for i in sample]\n\t\t\ta = np.random.uniform(size=[BSIZE,ZDIM],low=-1.0,high=1.0)\n\t\t\ta = a/np.linalg.norm(a,axis=1,keepdims=True)\n\t\t\t# ge = sess.run(generated,feed_dict={z:a})\n\t\t\tfor _ in range(5):\n\t\t\t\tsess.run(trainG,feed_dict={z:a,imgholder:x_train})\n\t\t\t_,_,mg,lsd,lsg,lsc = sess.run([trainC,trainD,merged,lossD,lossG,lossC],feed_dict={z:a,imgholder:x_train,classholder:y_train})\n\t\t\tif (i)%5 == 0:\n\t\t\t\twriter.add_summary(mg,counter)\n\t\t\t\tprint('iter:',i)\n\t\t\t\tprint('lsd:',lsd)\n\t\t\t\tprint('lsg:',lsg)\n\t\t\t\tprint('lsc:',lsc)\n\t\t\tif (i+1)%100==0:\n\t\t\t\tgetGeneratedImg(sess,i+1)\t\n\t\t\tif (i+1)%1000==0:\n\t\t\t\tsaver.save(sess,'./model/ModelCounter'+str(counter)+'.ckpt')\n\ndef getSample():\n\twith tf.Session() as sess:\n\t\tdata = getData()\n\t\tM.loadSess('./model/',sess=sess)\n\t\tfor i in range(20):\n\t\t\tx_train = random.sample(data,1)\n\t\t\t# print(x_train[0].shape)\n\t\t\tx_train = np.float32(x_train[0][0]).reshape([-1,128,128,1])\n\t\t\tfor j in range(8):\n\t\t\t\t# a = np.random.uniform(size=[1,ZDIM],low=-0.2,high=0.2)\n\t\t\t\ta = np.zeros([1,ZDIM],dtype=np.float32)\n\t\t\t\tgenimg = sess.run(generated,feed_dict={imgholder:x_train,noise:a})\n\t\t\t\tgenimg = (genimg+1)*127\n\t\t\t\tgenimg = genimg.astype(np.uint8)\n\t\t\t\tcv2.imwrite('./sampleimg/'+str(i)+'gen'+str(j)+'.jpg',cv2.resize(genimg[0],(128,128)))\n\t\t\t\tcv2.imwrite('./sampleimg/'+str(i)+'org.jpg',cv2.resize(((x_train[0]+1)*127).astype(np.uint8),(128,128)))\n\n# getSample()\ntraining()",
"import tensorflow as tf \nimport model3 as M \nimport datareader \nimport numpy as np \nimport tqdm \nimport network\n\ndef grad_loss(x, model):\n\tx2d, x3d = x\n\twith tf.GradientTape() as tape:\n\t\tpred, K, reprojected, crit_fake = model(x2d)\n\t\tcrit_real = model.crit(x3d)\n\n\t\tcrit_dis = tf.reduce_mean(tf.square(crit_real - tf.ones_like(crit_real))) + tf.reduce_mean(tf.square(crit_fake - tf.zeros_like(crit_fake)))\n\t\tcrit_gen = tf.reduce_mean(tf.square(crit_fake - tf.ones_like(crit_fake)))\n\n\t\trep_loss = tf.reduce_mean(tf.square(pred - x2d))\n\n\t\tKK = tf.matmul(K, K, transpose_b=True)\n\t\tK_trace = tf.expand_dims(tf.expand_dims(tf.trace(KK), -1), -1)\n\t\tK_loss = tf.reduce_mean(tf.abs(KK / K_trace - tf.eye(2))) \n\n\t\tloss_total_gen = crit_gen + rep_loss + K_loss\n\n\tgen_var = model.get_gen_vars()\n\tdis_var = model.dis.trainable_variables\n\tgrads = tape.gradient([loss_total_gen, crit_dis], [gen_var, dis_var])\n\treturn grads, [crit_dis, crit_gen, rep_loss, K_loss]\n\nreader = datareader.DataReader(16)\nmodel = network.RepNet()\noptim = tf.optimizers.Adam(0.0001, 0.5)\nsaver = M.Saver(model)\nsaver.restore('./model/')\n\nMAXITER = 10000\n\nbar = tqdm(range(MAXITER+1))\nfor i in bar:\n\tbatch = reader.get_next()\n\tgrads, lss = grad_loss(batch, model)\n\n\tgen_var = model.get_gen_vars()\n\tdis_var = model.dis.trainable_variables\n\toptim.apply_gradients(zip(grads[0], gen_var))\n\toptim.apply_gradients(zip(grads[1], dis_var))\n\n\tbar.set_description('CDis:%.4f CGen:%.4f Rep:%.4f K:%.4f'%(lss[0], lss[1], lss[2], lss[3]))\n\n\tif i%1000==0 and i>0:\n\t\tsaver.save('./model/repnet.ckpt')\n",
"import numpy as np \nimport cv2 \nimport random\nfrom multiprocessing.pool import ThreadPool\nimport time \n\ndef adjust_img(img):\n\t# a = np.random.randint(2)\n\t# if a==1:\n\t# \timg = np.flip(img, axis=1)\n\tif random.random()>0.5:\n\t\timg = np.flip(img, axis=1)\n\treturn img \n\n# def process(batch, eye):\n# \t# add more process here\n# \timgs, labels = list(zip(*batch))\n# \t# imgs = [cv2.resize(cv2.imread(i), (128,128)) for i in imgs]\n# \tt = time.time()\n# \timgs = [cv2.imread(i) for i in imgs]\n# \tt2 = time.time()\n# \tprint('DATA TIME', t2-t)\n# \timgs = [adjust_img(i) for i in imgs]\n# \tt3 = time.time()\n# \tprint('FLIP TIME', t3-t2)\n# \tlabels = eye[np.array(labels)]\n# \tbatch = [np.float32(imgs), np.float32(labels)]\n# \tt4 = time.time()\n# \tprint('CVT TIME', t4-t3)\n# \treturn batch\n\ndef process(sample):\n\tbatch, eye = sample\n\t# add more process here\n\timg, label = batch\n\t# imgs = [cv2.resize(cv2.imread(i), (128,128)) for i in imgs]\n\t# t = time.time()\n\timg = cv2.resize(cv2.imread(img), (128,128))\n\t# t2 = time.time()\n\t# print('DATA TIME', t2-t)\n\timg = adjust_img(img)\n\t# t3 = time.time()\n\t# print('FLIP TIME', t3-t2)\n\tlabel = eye[label]\n\t# t4 = time.time()\n\t# print('CVT TIME', t4-t3)\n\treturn img, label\n\nclass DataReader():\n\tdef __init__(self, listfile, bsize):\n\t\tf = open(listfile, 'r')\n\t\tself.data = []\n\t\tprint('Reading text file...')\n\t\tmax_label = 0\n\t\tfor line in f:\n\t\t\tline = line.strip().split('\\t')\n\t\t\timg = line[0]\n\t\t\tlabel = int(line[1])\n\t\t\tif label>max_label:\n\t\t\t\tmax_label = label\n\t\t\tself.data.append([img, label])\n\t\trandom.shuffle(self.data)\n\t\tprint(self.data[0])\n\t\tprint('Finished')\n\t\tself.pos = 0\n\t\tself.epoch = 0\n\t\tself.bsize = bsize\n\t\tself.max_label = label\n\t\tself.iter_per_epoch = len(self.data)//self.bsize\n\t\tself.pool = ThreadPool(processes=32)\n\t\tself.eye = np.eye(self.max_label+1)\n\t\tself.prefetch()\n\t\t\n\t\tprint('max_label:',max_label)\n\n\tdef prefetch(self):\n\t\tif self.pos + self.bsize > len(self.data):\n\t\t\tself.pos = 0\n\t\t\tself.epoch += 1\n\t\t\tprint(self.data[0])\n\t\t\trandom.shuffle(self.data)\n\n\t\tbatch = self.data[self.pos: self.pos+self.bsize]\n\t\targs = (batch, [self.eye]*len(batch))\n\t\targs = list(zip(*args))\n\t\tself.p = self.pool.map_async(process, args)\n\t\tself.pos += self.bsize\n\n\n\tdef get_next(self):\n\t\tbatch = self.p.get()\n\t\tbatch = list(zip(*batch))\n\t\tbatch = [np.float32(_) for _ in batch]\n\t\tself.prefetch()\n\t\treturn batch\n\nif __name__=='__main__':\n\tdata_reader = DataReader('imglist_iccv_clean.txt', 256*4)\n\tfor i in range(100):\n\t\tt1 = time.time()\n\t\tbatch = data_reader.get_next()\n\t\tt2 = time.time()\n\t\tprint(t2-t1)\n\tprint(batch[0].shape)\n\tprint(batch[1].shape)\n",
"import numpy as np \nimport pickle \nimport cv2 \nimport SUL.DataReader\n\ndef plot_gaussian(pos, size=64):\n\tx, y = pos[0], pos[1]\n\txx = np.linspace(0,size-1,size)\n\tyy = np.linspace(0,size-1,size)\n\txx, yy = np.meshgrid(xx, yy)\n\texp = np.exp( -((xx - x)**2 + (yy - y)**2 ) / 4 )\n\thmap = exp / exp.max()\n\treturn hmap\n\ndef get_hmap(pts, size=64, scale=4):\n\thmap = np.zeros([size, size, 17])\n\tif pts is None:\n\t\treturn hmap\n\tpts = pts.copy()\n\tpts[:,:2] /= scale\n\tfor i in range(len(pts)):\n\t\tif pts[i,2] > 0:\n\t\t\thmap[:,:,i] = plot_gaussian(pts[i])\n\treturn hmap\n\ndef get_minmax(pts):\n\txs = pts[:,0]\n\tys = pts[:,1]\n\tconf = pts[:,2]\n\tidx = np.where(conf>0)[0]\n\txs = xs[idx]\n\tys = ys[idx]\n\txmin, xmax = xs.min(), xs.max()\n\tymin, ymax = ys.min(), ys.max()\n\treturn xmin, xmax, ymin, ymax\n\ndef crop_norm(img, pts, augment=True):\n\t# TODO: add random scale and random shift while transforming \n\txmin, xmax, ymin, ymax = get_minmax(pts)\n\t\n\twh = max(ymax - ymin, xmax - xmin)\n\tscale = 256 / wh * (np.random.random() * 0.3 + 0.7)\n\n\timg = cv2.resize(img, None, fx=scale, fy=scale)\n\tpts[:,:2] = pts[:,:2] * scale\n\n\txmin, xmax, ymin, ymax = get_minmax(pts)\n\tcenter = [0.5 * (xmax + xmin), 0.5 * (ymin + ymax)]\n\txmin = center[0] - 128 \n\tif augment: xmin = xmin - np.random.random() * 80 + 40\n\tymin = center[1] - 128\n\tif augment: ymin = ymin - np.random.random() * 80 + 40\n\n\tH = np.float32([[1,0,-xmin], [0,1,-ymin]])\n\timg = cv2.warpAffine(img, H, (256,256))\n\tpts = pts - np.float32([xmin, ymin, 0])\n\treturn img, pts \n\ndef hmap_to_match(hmap):\n\t# TODO: choose certain index\n\tidx = [1,2,3,4,5,6,11,12,13,14,15,16]\n\tidx = np.int32(idx)\n\tresult = hmap[:,:,idx]\n\treturn result \n\ndef random_noise(hmap):\n\tnoise = np.random.random(hmap.shape) * 30 - 15\n\thmap += noise\n\treturn hmap\n\ndef random_mask(hmap, thresh=0.25):\n\tdomask = np.random.random()\n\tif domask<0.5:\n\t\tmask = np.random.random([12])\n\t\tmask[mask<thresh] = 0\n\t\tmask[mask>0] = 1 \n\t\thmap *= mask\n\ndef augment_pts(pts):\n\tpts = pts.copy()\n\trandom_shift = np.random.random(pts.shape) * 20 - 10\n\tpts[:,:2] += random_shift[:,:2]\n\treturn pts \n\ndef process(sample):\n\t# add more process here\n\timg, pts = sample\n\tis_centered = np.random.random()\n\timg = cv2.imread('./images/' + img)\n\tif is_centered<0.75:\n\t\timg, pts = crop_norm(img, pts)\n\t\thmap_match = hmap_to_match(get_hmap(augment_pts(pts))) * 256\n\t\trandom_mask(random_noise(hmap_match)) \n\t\thmap = get_hmap(pts) * 256\n\telse:\n\t\timg, pts = crop_norm(img, pts, augment=False)\n\t\thmap_match = hmap_to_match(get_hmap(None)) * 256\n\t\trandom_noise(hmap_match)\n\t\thmap = get_hmap(pts) * 256\n\treturn img, hmap_match, hmap\n\ndef post_process(inp):\n\tres = list(zip(*inp))\n\timgs = np.float32(res[0])\n\thmap_match = np.float32(res[1])\n\thmap = np.float32(res[2])\n\tres = [imgs, hmap_match, hmap]\n\tres = [np.transpose(i, (0,3,1,2)) for i in res]\n\treturn res \n\ndef get_data(listfile):\n\tprint('Reading pickle file...')\n\tdata = pickle.load(open(listfile,'rb'))\n\treturn data\n\ndef get_datareader(bsize, processes):\n\treader = SUL.DataReader.DataReader(bsize, processes=processes, gpus=1, sample_policy='RANDOM')\n\treader.set_data(get_data('mpii_3pts.pkl'))\n\treader.set_process_fn(process)\n\treader.set_post_process_fn(post_process)\n\treader.prefetch()\n\treturn reader\n\nif __name__=='__main__':\n\t# img = cv2.imread('./images/005808361.jpg')\n\t# data = get_data('mpii_3pts.pkl')\n\t# for i in data:\n\t# \tif i[0]=='005808361.jpg':\n\t# \t\tpts = i[1]\n\t# \t\tbreak \n\n\t# img, pts = crop_norm(img, pts)\n\t# hmap = get_hmap(pts)\n\n\t# import matplotlib.pyplot as plt \n\t\n\t# hmap = np.amax(hmap, axis=-1)\n\t# plt.figure()\n\t# plt.imshow(hmap, cmap='jet')\n\t# plt.figure()\n\t# plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n\n\t# for i in range(len(pts)):\n\t# \tplt.plot(pts[i,0], pts[i,1], 'o')\n\t# plt.show()\n\treader = get_datareader(1, 1)\n\tbatch = reader.get_next()\n\tprint(batch[0].shape, batch[1].shape, batch[2].shape)\n\n\timport matplotlib.pyplot as plt \n\timg = batch[0][0]\n\timg = np.transpose(img, (1,2,0))\n\timg = np.uint8(img)\n\tplt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n\t\n\n\thmapm = np.amax(batch[1][0], axis=0)\n\tplt.figure()\n\tplt.imshow(hmapm, cmap='jet')\n\tprint(hmapm.max(), hmapm.min())\n\n\thmap = np.amax(batch[2][0], axis=0)\n\tplt.figure()\n\tplt.imshow(hmap, cmap='jet')\n\tprint(hmap.max(), hmap.min())\n\tplt.show()\n",
"import torch\nimport numpy as np \nimport torch.nn as nn \nimport torch.nn.functional as F \nimport Model as M \nimport hrnet \n\nclass HRNET(M.Model):\n\tdef initialize(self, num_pts):\n\t\tself.backbone = hrnet.ResNet()\n\t\tself.lastconv = M.ConvLayer(1, num_pts)\n\tdef forward(self, x):\n\t\tx = self.backbone(x)\n\t\tx = self.lastconv(x)\n\t\treturn x \n\nnet = HRNET(17)\nnet.eval()\n\ndummy_inp = np.ones([1,3,256,256], dtype=np.float32)\ndummy_inp = torch.from_numpy(dummy_inp)\ny = net(dummy_inp)\n\nM.Saver(net).restore('./modeltorch/')\n\n## Do what the fuck you want.\n",
"import numpy as np \nimport tensorflow as tf \nimport model3 as M \n\nclass DenseBlock(M.Model):\n\tdef initialize(self):\n\t\tself.d1 = Dense(512, activation=M.PARAM_RELU)\n\t\tself.d2 = Dense(512, activation=M.PARAM_RELU)\n\tdef forward(self, x):\n\t\torign = x \n\t\tx = self.d1(x)\n\t\tx = self.d2(x)\n\t\tx = orign + x \n\t\treturn x \n\nclass GenNet(M.Model):\n\tdef initialize(self):\n\t\tself.d1 = Dense(512, activation=M.PARAM_RELU)\n\t\tself.d2 = DenseBlock()\n\t\tself.d3 = DenseBlock()\n\t\tself.d4 = Dense(17*3)\n\tdef forward(self, x):\n\t\tx = self.d1(x)\n\t\tx = self.d2(x)\n\t\tx = self.d3(x)\n\t\tx = self.d4(x)\n\t\treturn x \n\nclass CritNet(M.Model):\n\tdef initialize(self, C):\n\t\t# Chain matrix\n\t\tself.C = C # 17 x N\n\t\tself.bone_num = C.shape[-1]\n\t\tself.c1 = Dense(512, activation=M.PARAM_RELU)\n\t\tself.c2 = DenseBlock()\n\n\t\tself.d1 = Dense(512, activation=M.PARAM_RELU)\n\t\tself.d2 = DenseBlock()\n\t\tself.d3 = DenseBlock()\n\t\tself.d4 = Dense(1)\n\tdef forward(self, x):\n\t\t# The Kinematic branch\n\t\tc = x \n\t\tc = tf.reshape(c, [-1, 17, 3])\n\t\tc = tf.matmul(c, self.C, transpose_a=True)\n\t\tpsi = tf.matmul(c, c, transpose_a=True)\n\t\tpsi = tf.reshape(psi, [-1, self.bone_num*self.bone_num])\n\t\tpsi = self.c1(psi)\n\t\tpsi = self.c2(psi)\n\n\t\t# The normal branch\n\t\tx = self.d1(x)\n\t\tx = self.d2(x)\n\n\t\t# Final branch\n\t\tx = tf.concat([psi, x], axis=-1)\n\t\tx = self.d3(x)\n\t\tx = self.d4(x)\n\t\treturn x \n\nclass CamNet(M.Model):\n\tdef initialize(self):\n\t\tself.d1 = Dense(512, activation=M.PARAM_RELU)\n\t\tself.d2 = DenseBlock()\n\t\tself.d3 = DenseBlock()\n\t\tself.d4 = Dense(6)\n\tdef forward(self, x):\n\t\tx = self.d1(x)\n\t\tx = self.d2(x)\n\t\tx = self.d3(x)\n\t\tx = self.d4(x)\n\t\treturn x \n\nclass Reprojection(M.Model):\n\tdef forward(self, x, K):\n\t\tK = tf.reshape(K, [3, 2])\n\t\tx = tf.reshape(x, [-1, 17, 3])\n\t\tx = tf.matmul(x, K)\n\t\treturn x \n\nclass RepNet(M.Model):\n\tdef initialize(self):\n\t\tself.gen = GenNet()\n\t\tself.crit = CritNet()\n\t\tself.cam = CamNet()\n\t\tself.rep = Reprojection()\n\tdef forward(self, x):\n\t\tpred = self.gen(pred)\n\t\tK = self.cam()\n\t\tK = tf.reshape(K, [-1, 2, 3])\n\t\treprojected = self.rep(pred, K)\n\t\tcrit = self.crit(pred)\n\t\treturn pred, K, reprojected, crit\n\tdef predict(self, x):\n\t\tpred = self.gen(x)\n\t\treturn x \n\tdef get_gen_vars(self):\n\t\treturn self.gen.trainable_variables + self.cam.trainable_variables\n",
"import torch \nimport torch.nn as nn \nimport torch.nn.functional as F \nimport torch.nn.init as init \nfrom torch.nn.parameter import Parameter\nimport math \nimport numpy as np \n\nrecord_params = []\nparams_dict = {}\n\ndef init_caffe_input(x):\n\tglobal caffe_string, layer_counter\n\tif not 'caffe_string' in globals():\n\t\tcaffe_string = ''\n\tif not 'layer_counter' in globals():\n\t\tlayer_counter = 0\n\tcaffe_string += 'layer{\\n'\n\tcaffe_string += ' name: \"%s\"\\n'%x[1]\n\tcaffe_string += ' type: \"Input\"\\n'\n\tcaffe_string += ' top: \"%s\"\\n'%x[1]\n\tcaffe_string += ' input_param{\\n shape{\\n dim:%d\\n dim:%d\\n dim:%d\\n dim:%d\\n }\\n }\\n}\\n'%(x[0].shape[0], x[0].shape[1], x[0].shape[2], x[0].shape[3])\n\tlayer_counter += 1 \n\ndef compile_params_dict():\n\tglobal params_dict\n\tfor l in params_dict.keys():\n\t\tlayer = params_dict[l]\n\t\tfor k in layer.keys():\n\t\t\t# print(l, k, layer[k])\n\t\t\tlayer[k] = layer[k].cpu().detach().numpy()\n\ndef _resnet_normal(tensor):\n\tfan_in, fan_out = init._calculate_fan_in_and_fan_out(tensor)\n\tstd = math.sqrt(2.0 / float(fan_out))\n\treturn init._no_grad_normal_(tensor, 0., std)\n\nclass Model(nn.Module):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(Model, self).__init__()\n\t\tself.is_built = False\n\t\tself.initialize(*args, **kwargs)\n\n\tdef initialize(self, *args, **kwargs):\n\t\tpass \n\n\tdef build(self, *inputs):\n\t\tpass \n\n\tdef __call__(self, *input, **kwargs):\n\t\tif not self.is_built:\n\t\t\tself.build(*input)\n\t\tfor hook in self._forward_pre_hooks.values():\n\t\t\tresult = hook(self, input)\n\t\t\tif result is not None:\n\t\t\t\tif not isinstance(result, tuple):\n\t\t\t\t\tresult = (result,)\n\t\t\t\tinput = result\n\t\tif torch._C._get_tracing_state():\n\t\t\tresult = self._slow_forward(*input, **kwargs)\n\t\telse:\n\t\t\tresult = self.forward(*input, **kwargs)\n\t\tfor hook in self._forward_hooks.values():\n\t\t\thook_result = hook(self, input, result)\n\t\t\tif hook_result is not None:\n\t\t\t\tresult = hook_result\n\t\tif len(self._backward_hooks) > 0:\n\t\t\tvar = result\n\t\t\twhile not isinstance(var, torch.Tensor):\n\t\t\t\tif isinstance(var, dict):\n\t\t\t\t\tvar = next((v for v in var.values() if isinstance(v, torch.Tensor)))\n\t\t\t\telse:\n\t\t\t\t\tvar = var[0]\n\t\t\tgrad_fn = var.grad_fn\n\t\t\tif grad_fn is not None:\n\t\t\t\tfor hook in self._backward_hooks.values():\n\t\t\t\t\twrapper = functools.partial(hook, self)\n\t\t\t\t\tfunctools.update_wrapper(wrapper, hook)\n\t\t\t\t\tgrad_fn.register_hook(wrapper)\n\t\tself.is_built = True\n\t\treturn result\n\n\tdef record(self):\n\t\tdef set_record_flag(obj):\n\t\t\tobj.record = True\n\t\tself.apply(set_record_flag)\n\nclass conv2D(Model):\n\tdef initialize(self, size, outchn, stride=1, pad='SAME_LEFT', dilation_rate=1, usebias=True, gropus=1):\n\t\tself.size = size\n\t\tself.outchn = outchn\n\t\tself.stride = stride\n\t\tself.usebias = usebias\n\t\tself.gropus = gropus\n\t\tself.dilation_rate = dilation_rate\n\t\tassert (pad in ['VALID','SAME_LEFT'])\n\t\tself.pad = pad \n\n\tdef _parse_args(self, input_shape):\n\t\tinchannel = input_shape[1]\n\t\t# parse args\n\t\tif isinstance(self.size,list):\n\t\t\t# self.size = [self.size[0],self.size[1],inchannel,self.outchn]\n\t\t\tif self.pad == 'VALID':\n\t\t\t\tself.pad = 0\n\t\t\telse:\n\t\t\t\tself.pad = ((self.size[0]+ (self.dilation_rate-1) * ( self.size-1 ))//2, (self.size[1]+ (self.dilation_rate-1) * ( self.size-1 ))//2)\n\t\t\tself.size = [self.outchn, inchannel // self.gropus, self.size[0], self.size[1]]\n\t\telse:\n\t\t\tif self.pad == 'VALID':\n\t\t\t\tself.pad = 0\n\t\t\telse:\n\t\t\t\tself.pad = (self.size + (self.dilation_rate-1) * ( self.size-1 ))//2\n\t\t\tself.size = [self.outchn, inchannel // self.gropus, self.size, self.size]\n\n\tdef build(self, *inputs):\n\t\t# print('building...')\n\t\tinp = inputs[0][0]\n\t\tself._parse_args(inp.shape)\n\t\tself.weight = Parameter(torch.Tensor(*self.size))\n\t\tif self.usebias:\n\t\t\tself.bias = Parameter(torch.Tensor(self.outchn))\n\t\telse:\n\t\t\tself.register_parameter('bias', None)\n\t\tself.reset_params()\n\n\tdef reset_params(self):\n\t\t_resnet_normal(self.weight)\n\t\tif self.bias is not None:\n\t\t\tfan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n\t\t\tbound = 1 / math.sqrt(fan_in)\n\t\t\tinit.uniform_(self.bias, -bound, bound)\n\n\tdef _write_caffe(self, name):\n\t\tglobal caffe_string, layer_counter\n\t\tif not 'caffe_string' in globals():\n\t\t\tcaffe_string = ''\n\t\tif not 'layer_counter' in globals():\n\t\t\tlayer_counter = 0\n\t\tlayer_name = 'conv%d'%layer_counter\n\n\t\tstride = self.stride\n\t\tpad = self.pad \n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' type: \"Convolution\"\\n'\n\t\tcaffe_string += ' bottom: \"%s\"\\n'%name\n\t\tcaffe_string += ' top: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' convolution_param{\\n'\n\t\tcaffe_string += ' num_output: %d\\n'%self.outchn\n\t\tcaffe_string += ' bias_term: %s\\n'%('true' if self.usebias else 'false')\n\t\tcaffe_string += ' group: 1\\n'\n\t\tcaffe_string += ' stride: %d\\n'%stride\n\t\tcaffe_string += ' pad_h: %d\\n'%pad\n\t\tcaffe_string += ' pad_w: %d\\n'%pad\n\t\tcaffe_string += ' kernel_h: %d\\n'%(self.size[2])\n\t\tcaffe_string += ' kernel_w: %d\\n'%(self.size[3])\n\t\tcaffe_string += ' }\\n}\\n'\n\n\t\tparams_dict[layer_name] = {}\n\t\tparams_dict[layer_name]['kernel'] = self.weight\n\t\tif self.usebias:\n\t\t\tparams_dict[layer_name]['bias'] = self.bias\n\n\t\tlayer_counter += 1 \n\t\treturn layer_name\n\n\tdef forward(self, x):\n\t\tres = F.conv2d(x[0], self.weight, self.bias, self.stride, self.pad, self.dilation_rate, self.gropus)\n\t\tlname = self._write_caffe(x[1])\n\t\treturn res, lname\n\nclass dwconv2D(Model):\n\t# depth-wise conv2d\n\tdef initialize(self, size, multiplier, stride=1, pad='SAME_LEFT', dilation_rate=1, usebias=True):\n\t\tself.size = size\n\t\tself.multiplier = multiplier\n\t\tself.stride = stride\n\t\tself.usebias = usebias\n\t\tself.dilation_rate = dilation_rate\n\t\tassert (pad in ['VALID','SAME_LEFT'])\n\t\tself.pad = pad \n\n\tdef _parse_args(self, input_shape):\n\t\tinchannel = input_shape[1]\n\t\tself.gropus = inchannel\n\t\t# parse args\n\t\tif isinstance(self.size,list):\n\t\t\t# self.size = [self.size[0],self.size[1],inchannel,self.outchn]\n\t\t\tif self.pad == 'VALID':\n\t\t\t\tself.pad = 0\n\t\t\telse:\n\t\t\t\tself.pad = ((self.size[0]+ (self.dilation_rate-1) * ( self.size-1 ))//2, (self.size[1]+ (self.dilation_rate-1) * ( self.size-1 ))//2)\n\t\t\tself.size = [self.multiplier * inchannel, 1, self.size[0], self.size[1]]\n\t\telse:\n\t\t\tif self.pad == 'VALID':\n\t\t\t\tself.pad = 0\n\t\t\telse:\n\t\t\t\tself.pad = (self.size + (self.dilation_rate-1) * ( self.size-1 ))//2\n\t\t\tself.size = [self.multiplier * inchannel, 1, self.size, self.size]\n\n\tdef build(self, *inputs):\n\t\t# print('building...')\n\t\tinp = inputs[0][0]\n\t\tself._parse_args(inp.shape)\n\t\tself.weight = Parameter(torch.Tensor(*self.size))\n\t\tif self.usebias:\n\t\t\tself.bias = Parameter(torch.Tensor(self.size[0]))\n\t\telse:\n\t\t\tself.register_parameter('bias', None)\n\t\tself.reset_params()\n\n\tdef reset_params(self):\n\t\t_resnet_normal(self.weight)\n\t\tif self.bias is not None:\n\t\t\tfan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n\t\t\tbound = 1 / math.sqrt(fan_in)\n\t\t\tinit.uniform_(self.bias, -bound, bound)\n\n\tdef _write_caffe(self, name):\n\t\tglobal caffe_string, layer_counter\n\t\tif not 'caffe_string' in globals():\n\t\t\tcaffe_string = ''\n\t\tif not 'layer_counter' in globals():\n\t\t\tlayer_counter = 0\n\t\tlayer_name = 'conv%d'%layer_counter\n\n\t\tstride = self.stride\n\t\tpad = self.pad\n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' type: \"Convolution\"\\n'\n\t\tcaffe_string += ' bottom: \"%s\"\\n'%name\n\t\tcaffe_string += ' top: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' convolution_param{\\n'\n\t\tcaffe_string += ' num_output: %d\\n'%(self.multiplier * self.gropus)\n\t\tcaffe_string += ' bias_term: %s\\n'%('true' if self.usebias else 'false')\n\t\tcaffe_string += ' group: %d\\n'%self.gropus\n\t\tcaffe_string += ' stride: %d\\n'%stride\n\t\tcaffe_string += ' pad_h: %d\\n'%pad\n\t\tcaffe_string += ' pad_w: %d\\n'%pad\n\t\tcaffe_string += ' kernel_h: %d\\n'%(self.size[2])\n\t\tcaffe_string += ' kernel_w: %d\\n'%(self.size[3])\n\t\tcaffe_string += ' }\\n}\\n'\n\n\t\tparams_dict[layer_name] = {}\n\t\tparams_dict[layer_name]['dwkernel'] = self.weight\n\t\tif self.usebias:\n\t\t\tparams_dict[layer_name]['bias'] = self.bias\n\n\t\tlayer_counter += 1 \n\t\treturn layer_name\n\n\tdef forward(self, x):\n\t\tres = F.conv2d(x[0], self.weight, self.bias, self.stride, self.pad, self.dilation_rate, self.gropus)\n\t\tlname = self._write_caffe(x[1])\n\t\treturn res, lname\n\nclass conv1D(Model):\n\tdef initialize(self, size, outchn, stride=1, pad='SAME_LEFT', dilation_rate=1, usebias=True, gropus=1):\n\t\tself.size = size\n\t\tself.outchn = outchn\n\t\tself.stride = stride\n\t\tself.usebias = usebias\n\t\tself.gropus = gropus\n\t\tself.dilation_rate = dilation_rate\n\t\tassert (pad in ['VALID','SAME_LEFT'])\n\t\tself.pad = pad \n\n\tdef _parse_args(self, input_shape):\n\t\tinchannel = input_shape[1]\n\t\t# parse args\n\t\tif self.pad == 'VALID':\n\t\t\tself.pad = 0\n\t\telse:\n\t\t\tself.pad = (self.size + (self.dilation_rate-1) * ( self.size-1 ))//2\n\t\tself.size = [self.outchn, inchannel // self.gropus, self.size]\n\n\tdef build(self, *inputs):\n\t\t# print('building...')\n\t\tinp = inputs[0]\n\t\tself._parse_args(inp.shape)\n\t\tself.weight = Parameter(torch.Tensor(*self.size))\n\t\tif self.usebias:\n\t\t\tself.bias = Parameter(torch.Tensor(self.outchn))\n\t\telse:\n\t\t\tself.register_parameter('bias', None)\n\t\tself.reset_params()\n\n\tdef reset_params(self):\n\t\tinit.kaiming_uniform_(self.weight, a=math.sqrt(5))\n\t\tif self.bias is not None:\n\t\t\tfan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n\t\t\tbound = 1 / math.sqrt(fan_in)\n\t\t\tinit.uniform_(self.bias, -bound, bound)\n\n\tdef forward(self, x):\n\t\treturn F.conv1d(x, self.weight, self.bias, self.stride, self.pad, self.dilation_rate, self.gropus)\n\nclass conv3D(Model):\n\tdef initialize(self, size, outchn, stride=1, pad='SAME_LEFT', dilation_rate=1, usebias=True, gropus=1):\n\t\tself.size = size\n\t\tself.outchn = outchn\n\t\tself.stride = stride\n\t\tself.usebias = usebias\n\t\tself.gropus = gropus\n\t\tself.dilation_rate = dilation_rate\n\t\tassert (pad in ['VALID','SAME_LEFT'])\n\t\tself.pad = pad \n\n\tdef _parse_args(self, input_shape):\n\t\tinchannel = input_shape[1]\n\t\t# parse args\n\t\tif isinstance(self.size,list) or isinstance(self.size, tuple):\n\t\t\t# self.size = [self.size[0],self.size[1],inchannel,self.outchn]\n\t\t\tif self.pad == 'VALID':\n\t\t\t\tself.pad = 0\n\t\t\telse:\n\t\t\t\tself.pad = ((self.size[0]+ (self.dilation_rate-1) * ( self.size-1 ))//2, (self.size[1]+ (self.dilation_rate-1) * ( self.size-1 ))//2, (self.size[2]+ (self.dilation_rate-1) * ( self.size-1 ))//2)\n\t\t\tself.size = [self.outchn, inchannel // self.gropus, self.size[0], self.size[1], self.size[2]]\n\t\telse:\n\t\t\tif self.pad == 'VALID':\n\t\t\t\tself.pad = 0\n\t\t\telse:\n\t\t\t\tself.pad = (self.size + (self.dilation_rate-1) * ( self.size-1 ))//2\n\t\t\tself.size = [self.outchn, inchannel // self.gropus, self.size, self.size, self.size]\n\n\tdef build(self, *inputs):\n\t\t# print('building...')\n\t\tinp = inputs[0]\n\t\tself._parse_args(inp.shape)\n\t\tself.weight = Parameter(torch.Tensor(*self.size))\n\t\tif self.usebias:\n\t\t\tself.bias = Parameter(torch.Tensor(self.outchn))\n\t\telse:\n\t\t\tself.register_parameter('bias', None)\n\t\tself.reset_params()\n\n\tdef reset_params(self):\n\t\tinit.kaiming_uniform_(self.weight, a=math.sqrt(5))\n\t\tif self.bias is not None:\n\t\t\tfan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n\t\t\tbound = 1 / math.sqrt(fan_in)\n\t\t\tinit.uniform_(self.bias, -bound, bound)\n\n\tdef forward(self, x):\n\t\treturn F.conv3d(x, self.weight, self.bias, self.stride, self.pad, self.dilation_rate, self.gropus)\n\nclass fclayer(Model):\n\tdef initialize(self, outsize, usebias=True, norm=False):\n\t\tself.outsize = outsize\n\t\tself.usebias = usebias\n\t\tself.norm = norm\n\n\tdef build(self, *inputs):\n\t\t# print('building...')\n\t\tself.insize = inputs[0][0].shape[1]\n\t\tself.weight = Parameter(torch.Tensor(self.outsize, self.insize))\n\t\tif self.usebias:\n\t\t\tself.bias = Parameter(torch.Tensor(self.outsize))\n\t\telse:\n\t\t\tself.register_parameter('bias', None)\n\t\tself.reset_params()\n\n\tdef reset_params(self):\n\t\t# init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n\t\t# init.normal_(self.weight, std=0.01)\n\t\t_resnet_normal(self.weight)\n\t\tprint('Reset fc params...')\n\t\tif self.bias is not None:\n\t\t\tfan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n\t\t\tbound = 1 / math.sqrt(fan_in)\n\t\t\tinit.uniform_(self.bias, -bound, bound)\n\n\tdef _write_caffe(self, name):\n\t\tglobal caffe_string, layer_counter\n\t\tif not 'caffe_string' in globals():\n\t\t\tcaffe_string = ''\n\t\tif not 'layer_counter' in globals():\n\t\t\tlayer_counter = 0\n\t\tlayer_name = 'fc%d'%layer_counter\n\n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' type: \"InnerProduct\"\\n'\n\t\tcaffe_string += ' bottom: \"%s\"\\n'%name\n\t\tcaffe_string += ' top: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' inner_product_param{\\n'\n\t\tcaffe_string += ' num_output: %d\\n'%self.outsize\n\t\tcaffe_string += ' bias_term: %s\\n'%('true' if self.usebias else 'false')\n\t\tcaffe_string += ' }\\n}\\n'\n\n\t\tparams_dict[layer_name] = {}\n\t\tparams_dict[layer_name]['fckernel'] = self.weight\n\n\t\tif self.usebias:\n\t\t\tparams_dict[layer_name]['bias'] = self.bias\n\n\t\tlayer_counter += 1 \n\t\treturn layer_name\n\n\tdef forward(self, x):\n\t\tlname = self._write_caffe(x[1])\n\t\tx = x[0]\n\t\tif self.norm:\n\t\t\twith torch.no_grad():\n\t\t\t\tnorm = x.norm(p=2, dim=1, keepdim=True)\n\t\t\t\twnorm = self.weight.norm(p=2,dim=1, keepdim=True)\n\t\t\tx = x / norm\n\t\t\tweight = self.weight / wnorm\n\t\telse:\n\t\t\tweight = self.weight\n\t\treturn F.linear(x, weight, self.bias), lname\n\ndef flatten(x):\n\tx = x.view(x.size(0), -1)\n\treturn x \n\nclass Flatten(Model):\n\tdef _write_caffe(self, name):\n\t\tglobal caffe_string, layer_counter\n\t\tif not 'caffe_string' in globals():\n\t\t\tcaffe_string = ''\n\t\tif not 'layer_counter' in globals():\n\t\t\tlayer_counter = 0\n\t\tlayer_name = 'flatten%d'%layer_counter\n\n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' type: \"Flatten\"\\n'\n\t\tcaffe_string += ' bottom: \"%s\"\\n'%name\n\t\tcaffe_string += ' top: \"%s\"\\n'%layer_name\n\t\t# caffe_string += ' crop_param{\\n offset:%d\\n offset:%d\\n }\\n}\\n'%(1,1)\n\t\tcaffe_string += '}\\n'\n\n\t\tlayer_counter += 1 \n\t\treturn layer_name\n\n\tdef forward(self, x):\n\t\t# print(x)\n\t\tlname = self._write_caffe(x[1])\n\t\treturn flatten(x[0]), lname\n\nclass MaxPool2d(Model):\n\tdef initialize(self, size, stride=1, pad='SAME_LEFT', dilation_rate=1):\n\t\tself.size = size\n\t\tself.stride = stride\n\t\tself.pad = pad\n\t\tself.dilation_rate = dilation_rate\n\n\tdef _parse_args(self, input_shape):\n\t\tinchannel = input_shape[1]\n\t\t# parse args\n\t\tif isinstance(self.size,list) or isinstance(self.size, tuple):\n\t\t\t# self.size = [self.size[0],self.size[1],inchannel,self.outchn]\n\t\t\tif self.pad == 'VALID':\n\t\t\t\tself.pad = 0\n\t\t\telse:\n\t\t\t\tself.pad = (self.size[0]//2, self.size[1]//2, self.size[2]//2)\n\t\telse:\n\t\t\tif self.pad == 'VALID':\n\t\t\t\tself.pad = 0\n\t\t\telse:\n\t\t\t\tself.pad = self.size//2\n\n\tdef build(self, *inputs):\n\t\t# print('building...')\n\t\tinp = inputs[0]\n\t\tself._parse_args(inp.shape)\n\n\tdef forward(self, x):\n\t\treturn F.max_pool2d(x, self.size, self.stride, self.pad, self.dilation_rate, False, False)\n\nclass AvgPool2d(Model):\n\tdef initialize(self, size, stride=1, pad='SAME_LEFT'):\n\t\tself.size = size\n\t\tself.stride = stride\n\t\tself.pad = pad\n\n\tdef _parse_args(self, input_shape):\n\t\tinchannel = input_shape[1]\n\t\t# parse args\n\t\tif isinstance(self.size,list) or isinstance(self.size, tuple):\n\t\t\t# self.size = [self.size[0],self.size[1],inchannel,self.outchn]\n\t\t\tif self.pad == 'VALID':\n\t\t\t\tself.pad = 0\n\t\t\telse:\n\t\t\t\tself.pad = (self.size[0]//2, self.size[1]//2, self.size[2]//2)\n\t\telse:\n\t\t\tif self.pad == 'VALID':\n\t\t\t\tself.pad = 0\n\t\t\telse:\n\t\t\t\tself.pad = self.size//2\n\n\tdef build(self, *inputs):\n\t\t# print('building...')\n\t\tinp = inputs[0]\n\t\tself._parse_args(inp.shape)\n\n\tdef forward(self, x):\n\t\treturn F.avg_pool2d(x, self.size, self.stride, self.pad, False, True)\n\nclass BatchNorm(Model):\n\t# _version = 2\n\t# __constants__ = ['track_running_stats', 'momentum', 'eps', 'weight', 'bias',\n\t# \t\t\t\t 'running_mean', 'running_var', 'num_batches_tracked',\n\t# \t\t\t\t 'num_features', 'affine', 'weight', 'bias']\n\n\tdef initialize(self, eps=2e-5, momentum=0.01, affine=True,\n\t\t\t\t track_running_stats=True):\n\t\tself.eps = eps\n\t\tself.momentum = momentum\n\t\tself.affine = affine\n\t\tself.track_running_stats = track_running_stats\n\t\t\n\tdef build(self, *inputs):\n\t\t# print('building...')\n\t\tnum_features = inputs[0][0].shape[1]\n\t\tif self.affine:\n\t\t\tself.weight = Parameter(torch.Tensor(num_features))\n\t\t\tself.bias = Parameter(torch.Tensor(num_features))\n\t\telse:\n\t\t\tself.register_parameter('weight', None)\n\t\t\tself.register_parameter('bias', None)\n\t\tif self.track_running_stats:\n\t\t\tself.register_buffer('running_mean', torch.zeros(num_features))\n\t\t\tself.register_buffer('running_var', torch.ones(num_features))\n\t\t\tself.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))\n\t\telse:\n\t\t\tself.register_parameter('running_mean', None)\n\t\t\tself.register_parameter('running_var', None)\n\t\t\tself.register_parameter('num_batches_tracked', None)\n\t\tself.reset_parameters()\n\n\tdef reset_running_stats(self):\n\t\tif self.track_running_stats:\n\t\t\tself.running_mean.zero_()\n\t\t\tself.running_var.fill_(1)\n\t\t\tself.num_batches_tracked.zero_()\n\n\tdef reset_parameters(self):\n\t\tself.reset_running_stats()\n\t\tif self.affine:\n\t\t\tinit.ones_(self.weight)\n\t\t\tinit.zeros_(self.bias)\n\n\tdef _write_caffe(self, btm):\n\t\tglobal caffe_string, layer_counter\n\t\tif not 'caffe_string' in globals():\n\t\t\tcaffe_string = ''\n\t\tif not 'layer_counter' in globals():\n\t\t\tlayer_counter = 0\n\n\t\tlayer_name_bn = 'bn%d'%layer_counter\n\n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name_bn\n\t\tcaffe_string += ' type: \"BatchNorm\"\\n'\n\t\tcaffe_string += ' bottom: \"%s\"\\n'%btm\n\t\tcaffe_string += ' top: \"%s\"\\n'%layer_name_bn\n\t\tcaffe_string += ' batch_norm_param{\\n use_global_stats:true\\n eps:2e-5\\n }\\n'\n\t\tcaffe_string += '}\\n'\n\n\t\tparams_dict[layer_name_bn] = {}\n\t\tparams_dict[layer_name_bn]['mean'] = self.running_mean\n\t\tparams_dict[layer_name_bn]['var'] = self.running_var\n\t\tparams_dict[layer_name_bn]['scale'] = torch.from_numpy(np.float32([1.]))\n\n\t\tlayer_name_scale = 'scale%d'%layer_counter\n\n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name_scale\n\t\tcaffe_string += ' type: \"Scale\"\\n'\n\t\tcaffe_string += ' bottom: \"%s\"\\n'%layer_name_bn\n\t\tcaffe_string += ' top: \"%s\"\\n'%layer_name_bn\n\t\tcaffe_string += ' scale_param{\\n bias_term:true\\n }\\n'\n\t\tcaffe_string += '}\\n'\n\t\tparams_dict[layer_name_scale] = {}\n\t\tparams_dict[layer_name_scale]['scale'] = self.weight\n\t\t# print(layer_name, self.weight)\n\t\tparams_dict[layer_name_scale]['bias'] = self.bias\n\t\treturn layer_name_bn\n\n\tdef forward(self, input):\n\t\tlname = self._write_caffe(input[1])\n\t\tinput = input[0]\n\t\tif self.momentum is None:\n\t\t\texponential_average_factor = 0.0\n\t\telse:\n\t\t\texponential_average_factor = self.momentum\n\n\t\tif self.training and self.track_running_stats:\n\t\t\t# TODO: if statement only here to tell the jit to skip emitting this when it is None\n\t\t\tif self.num_batches_tracked is not None:\n\t\t\t\tself.num_batches_tracked += 1\n\t\t\t\tif self.momentum is None: # use cumulative moving average\n\t\t\t\t\texponential_average_factor = 1.0 / float(self.num_batches_tracked)\n\t\t\t\telse: # use exponential moving average\n\t\t\t\t\texponential_average_factor = self.momentum\n\n\t\tresult = F.batch_norm(\n\t\t\tinput, self.running_mean, self.running_var, self.weight, self.bias,\n\t\t\tself.training or not self.track_running_stats,\n\t\t\texponential_average_factor, self.eps)\n\t\treturn result, lname\n\ndef GlobalAvgPool2D(x):\n\tx = x.mean(dim=(2,3), keepdim=True)\n\treturn x \n\nclass GlobalAvgPool2DLayer(Model):\n\tdef _write_caffe(self, name, ksize):\n\t\tglobal caffe_string, layer_counter\n\t\tif not 'caffe_string' in globals():\n\t\t\tcaffe_string = ''\n\t\tif not 'layer_counter' in globals():\n\t\t\tlayer_counter = 0\n\t\tlayer_name = 'gavgpool%d'%layer_counter\n\n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' type: \"Pooling\"\\n'\n\t\tcaffe_string += ' bottom:\"%s\"\\n'%name\n\t\tcaffe_string += ' top: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' pooling_param{\\n pool:AVE\\n kernel_size:%d\\n }\\n'%ksize\n\t\tcaffe_string += '}\\n'\n\t\treturn layer_name\n\n\tdef forward(self, x):\n\t\tlname = self._write_caffe(x[1], x.shape[2])\n\t\treturn GlobalAvgPool2D(x[0]), lname\n\ndef activation(x, act, **kwargs):\n\tif act==-1:\n\t\treturn x\n\telif act==0:\n\t\treturn F.relu(x)\n\telif act==1:\n\t\treturn F.leaky_relu(x, negative_slope=0.2)\n\telif act==2:\n\t\treturn F.elu(x)\n\telif act==3:\n\t\treturn F.tanh(x)\n\telif act==6:\n\t\treturn torch.sigmoid(x)\n\nclass Activation(Model):\n\tdef initialize(self, act):\n\t\tself.param = act \n\t\tif act==9:\n\t\t\tself.act = torch.nn.PReLU(num_parameters=1)\n\n\tdef build(self, *inputs):\n\t\toutchn = inputs[0][0].shape[1]\n\t\tif self.param==8:\n\t\t\tself.act = torch.nn.PReLU(num_parameters=outchn)\n\n\tdef _write_caffe(self, btm):\n\t\tglobal caffe_string, layer_counter\n\t\tif not 'caffe_string' in globals():\n\t\t\tcaffe_string = ''\n\t\tif not 'layer_counter' in globals():\n\t\t\tlayer_counter = 0\n\t\tlayer_name = 'actv%d'%(layer_counter)\n\n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name\n\t\tif self.param == 0:\n\t\t\tcaffe_string += ' type: \"ReLU\"\\n'\n\t\telif self.param in [1,8,9]:\n\t\t\tcaffe_string += ' type: \"PReLU\"\\n'\n\t\t\tparams_dict[layer_name] = {}\n\t\t\tif self.param==1:\n\t\t\t\tparams_dict[layer_name]['gamma'] = torch.from_numpy(np.float32([0.2]))\n\t\t\telse:\n\t\t\t\tparams_dict[layer_name]['gamma'] = list(self.parameters())[0]\n\t\telif self.param == 6:\n\t\t\tcaffe_string += ' type: \"Sigmoid\"\\n'\n\t\tcaffe_string += ' bottom: \"%s\"\\n'%btm\n\t\tcaffe_string += ' top: \"%s\"\\n'%btm\n\t\tcaffe_string += '}\\n'\n\n\t\tlayer_counter += 1 \n\t\treturn btm \n\n\tdef forward(self, x):\n\t\tif self.param == -1:\n\t\t\tlname = x[1]\n\t\telse:\n\t\t\tlname = self._write_caffe(x[1])\n\t\tx = x[0]\n\t\t# print(x.shape)\n\t\tif self.param==8 or self.param==9:\n\t\t\tres = self.act(x)\n\t\telse:\n\t\t\tres = activation(x, self.param)\n\t\treturn res, lname\n\nclass BroadcastMUL(Model):\n\tdef _write_caffe(self, names, tiles):\n\t\tglobal caffe_string, layer_counter\n\t\tif not 'caffe_string' in globals():\n\t\t\tcaffe_string = ''\n\t\tif not 'layer_counter' in globals():\n\t\t\tlayer_counter = 0\n\n\t\t# manual tiling layers to match the size \n\t\tlayer_name = 'tile_0_%d'%layer_counter\n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' type: \"Tile\"\\n'\n\t\tcaffe_string += ' bottom:\"%s\"\\n'%names[0]\n\t\tcaffe_string += ' top: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' tile_param{\\n axis:2\\n tiles:%d\\n }\\n'%tiles[0]\n\t\tcaffe_string += '}\\n'\n\n\t\tlayer_name = 'tile_1_%d'%layer_counter\n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' type: \"Tile\"\\n'\n\t\tcaffe_string += ' bottom:\"tile_0_%d\"\\n'%layer_counter\n\t\tcaffe_string += ' top: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' tile_param{\\n axis:3\\n tiles:%d\\n }\\n'%tiles[1]\n\t\tcaffe_string += '}\\n'\n\n\t\t# do multiplication\n\t\tlayer_name = 'mul%d'%layer_counter\n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' type: \"Eltwise\"\\n'\n\t\tcaffe_string += ' bottom:\"tile_1_%d\"\\n'%layer_counter\n\t\tcaffe_string += ' bottom:\"%s\"\\n'%names[1]\n\t\tcaffe_string += ' top: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' eltwise_param{\\n operation:PROD\\n }\\n'\n\t\tcaffe_string += '}\\n'\n\t\tlayer_counter += 1\n\t\treturn layer_name\n\n\tdef forward(self, x):\n\t\tnames = [i[1] for i in x]\n\t\txs = [i[0] for i in x]\n\t\tout = xs[0]*xs[1]\n\t\tlname = self._write_caffe(names, [xs[1].shape[2], xs[1].shape[3]])\n\t\treturn out, lname\n\nclass SUM(Model):\n\tdef _write_caffe(self, names):\n\t\tglobal caffe_string, layer_counter\n\t\tif not 'caffe_string' in globals():\n\t\t\tcaffe_string = ''\n\t\tif not 'layer_counter' in globals():\n\t\t\tlayer_counter = 0\n\t\tlayer_name = 'add%d'%layer_counter\n\n\t\tcaffe_string += 'layer{\\n'\n\t\tcaffe_string += ' name: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' type: \"Eltwise\"\\n'\n\t\tfor n in names:\n\t\t\tcaffe_string += ' bottom:\"%s\"\\n'%n\n\t\tcaffe_string += ' top: \"%s\"\\n'%layer_name\n\t\tcaffe_string += ' eltwise_param{\\n operation:SUM\\n }\\n'\n\t\tcaffe_string += '}\\n'\n\t\tlayer_counter += 1\n\t\treturn layer_name\n\n\tdef forward(self, *x):\n\t\tnames = [i[1] for i in x]\n\t\txs = [i[0] for i in x]\n\t\tlname = self._write_caffe(names)\n\t\treturn sum(xs), lname\n",
"import torch \nimport torch.nn as nn \nimport torch.nn.functional as F \nimport Model as M \nimport numpy as np \n\nclass ResUnit(M.Model):\n\tdef initialize(self, out, stride, shortcut=False):\n\t\tself.shortcut = shortcut\n\t\tself.c1 = M.ConvLayer(1, out//4, usebias=False, activation=M.PARAM_RELU, batch_norm=True)\n\t\tself.c2 = M.ConvLayer(3, out//4, usebias=False, activation=M.PARAM_RELU, pad='SAME_LEFT', stride=stride, batch_norm=True)\n\t\tself.c3 = M.ConvLayer(1, out, usebias=False, batch_norm=True)\n\t\tif shortcut:\n\t\t\tself.sc = M.ConvLayer(1, out, usebias=False, stride=stride, batch_norm=True)\n\n\tdef forward(self, x):\n\t\tbranch = self.c1(x)\n\t\tbranch = self.c2(branch)\n\t\tbranch = self.c3(branch)\n\t\tif self.shortcut:\n\t\t\tsc = self.sc(x)\n\t\telse:\n\t\t\tsc = x \n\t\tres = branch + sc\n\t\tres = M.activation(res, M.PARAM_RELU)\n\t\treturn res \n\nclass ResBlock(M.Model):\n\tdef initialize(self, out, stride, num_units):\n\t\tself.units = nn.ModuleList()\n\t\tfor i in range(num_units):\n\t\t\tself.units.append(ResUnit(out, stride if i==0 else 1, True if i==0 else False))\n\tdef forward(self, x):\n\t\tfor unit in self.units:\n\t\t\tx = unit(x)\n\t\treturn x \n\nclass BasicUnit(M.Model):\n\tdef initialize(self, out, stride, shortcut=False):\n\t\tself.shortcut = shortcut\n\t\tself.c1 = M.ConvLayer(3, out, pad='SAME_LEFT', usebias=False, activation=M.PARAM_RELU, batch_norm=True)\n\t\tself.c2 = M.ConvLayer(3, out, pad='SAME_LEFT', usebias=False, batch_norm=True)\n\t\tif shortcut:\n\t\t\tself.sc = M.ConvLayer(1, out, usebias=False, stride=stride, batch_norm=True)\n\n\tdef forward(self, x):\n\t\tbranch = self.c1(x)\n\t\tbranch = self.c2(branch)\n\t\tif self.shortcut:\n\t\t\tsc = self.sc(x)\n\t\telse:\n\t\t\tsc = x \n\t\tres = branch + sc\n\t\tres = M.activation(res, M.PARAM_RELU)\n\t\treturn res \n\nclass ResBasicBlock(M.Model):\n\tdef initialize(self, out, num_units):\n\t\tself.units = nn.ModuleList()\n\t\tfor i in range(num_units):\n\t\t\tself.units.append(BasicUnit(out, 1))\n\tdef forward(self, x):\n\t\tfor unit in self.units:\n\t\t\tx = unit(x)\n\t\treturn x \n\nclass Transition(M.Model):\n\tdef initialize(self, outchns, strides):\n\t\tself.trans = nn.ModuleList()\n\t\tfor i,(o,s) in enumerate(zip(outchns,strides)):\n\t\t\tif o is None or s is None:\n\t\t\t\tself.trans.append(None)\n\t\t\telif s==1:\n\t\t\t\tself.trans.append(M.ConvLayer(3,o, stride=s, pad='SAME_LEFT', activation=M.PARAM_RELU, usebias=False, batch_norm=True))\n\t\t\telse:\n\t\t\t\tself.trans.append(M.ConvLayer(3,o, stride=s, pad='SAME_LEFT', activation=M.PARAM_RELU, usebias=False, batch_norm=True))\n\n\tdef forward(self, x):\n\t\tresults = []\n\t\tfor i,t in enumerate(self.trans):\n\t\t\tif t is None:\n\t\t\t\tresults.append(x[i])\n\t\t\telse:\n\t\t\t\tresults.append(t(x[-1]))\n\t\treturn results\n\nclass FuseDown(M.Model):\n\tdef initialize(self, steps, inp, o):\n\t\tself.mods = nn.ModuleList()\n\t\tfor i in range(steps):\n\t\t\tif i==(steps-1):\n\t\t\t\tself.mods.append(M.ConvLayer(3, o, stride=2, pad='SAME_LEFT', batch_norm=True, usebias=False))\n\t\t\telse:\n\t\t\t\tself.mods.append(M.ConvLayer(3, inp, stride=2, pad='SAME_LEFT', activation=M.PARAM_RELU, batch_norm=True, usebias=False))\n\tdef forward(self, x):\n\t\tfor m in self.mods:\n\t\t\tx = m(x)\n\t\treturn x \n\nclass FuseUp(M.Model):\n\tdef initialize(self, o):\n\t\tself.c1 = M.ConvLayer(1, o, batch_norm=True, usebias=False)\n\tdef forward(self, x, target_shape):\n\t\tx = F.interpolate(x, size=target_shape, mode='nearest')\n\t\tx = self.c1(x)\n\t\treturn x \n\nclass Fuse(M.Model):\n\tdef initialize(self,outchns):\n\t\tbranches = nn.ModuleList()\n\t\tfor i in range(len(outchns)): # target\n\t\t\tbranch = nn.ModuleList()\n\t\t\tfor j in range(len(outchns)): # source\n\t\t\t\tif i==j:\n\t\t\t\t\tbranch.append(None)\n\t\t\t\telif i<j:\n\t\t\t\t\tbranch.append(FuseUp(outchns[i]))\n\t\t\t\telse:\n\t\t\t\t\tbranch.append(FuseDown(i-j, outchns[j], outchns[i]))\n\t\t\tbranches.append(branch)\n\t\tself.branches = branches\n\tdef forward(self, x):\n\t\tout = []\n\t\tfor i in range(len(self.branches)): # target\n\t\t\tbranch_out = []\n\t\t\tfor j in range(len(self.branches)): # source\n\t\t\t\tif i==j:\n\t\t\t\t\tbranch_out.append(x[i])\n\t\t\t\telif i<j:\n\t\t\t\t\tbranch_out.append(self.branches[i][j](x[j] , target_shape=x[i].shape[2:4]))\n\t\t\t\telse:\n\t\t\t\t\tbranch_out.append(self.branches[i][j](x[j]))\n\t\t\tbranch_out = sum(branch_out)\n\t\t\tout.append(M.activation(branch_out, M.PARAM_RELU))\n\t\treturn out \n\nclass FuseLast(M.Model):\n\tdef initialize(self, outchns):\n\t\tself.c1 = FuseUp(outchns[0])\n\t\tself.c2 = FuseUp(outchns[0])\n\t\tself.c3 = FuseUp(outchns[0])\n\t\tself.c_all = M.ConvLayer(3, outchns[0]*4, activation=M.PARAM_RELU, batch_norm=True, usebias=False)\n\tdef forward(self, x):\n\t\tout = [x[0]]\n\t\tout.append(self.c1(x[1], x[0].shape[2:4]))\n\t\tout.append(self.c2(x[2], x[0].shape[2:4]))\n\t\tout.append(self.c3(x[3], x[0].shape[2:4]))\n\t\tout = sum(out)\n\t\tout = M.activation(out, M.PARAM_RELU)\n\t\treturn out \n\nclass Stage(M.Model):\n\tdef initialize(self, outchns, strides, n, num_units, num_fuses, is_last_stage=False, d=False):\n\t\tself.d = d \n\t\tself.is_last_stage = is_last_stage\n\t\tself.num_fuses = num_fuses\n\t\tself.transition = Transition(outchns, strides)\n\t\tself.blocks = nn.ModuleList()\n\t\tself.fuses = nn.ModuleList()\n\t\tfor j in range(num_fuses):\n\t\t\tblock = nn.ModuleList()\n\t\t\tfor i in range(len(outchns)):\n\t\t\t\tblock.append(ResBasicBlock(outchns[i], num_units))\n\t\t\tself.blocks.append(block)\n\t\t\tif not (self.d and j==(self.num_fuses-1)):\n\t\t\t\tself.fuses.append(Fuse(outchns))\n\t\t\t\n\tdef forward(self, x ):\n\t\tx = self.transition(x)\n\t\tfor i in range(self.num_fuses):\n\t\t\tout = []\n\t\t\tfor o,b in zip(x, self.blocks[i]):\n\t\t\t\tout.append(b(o))\n\t\t\tif not (self.d and i==(self.num_fuses-1)):\n\t\t\t\tx = self.fuses[i](out)\n\t\t\telse:\n\t\t\t\tx = out \n\t\treturn x \n\nclass ResNet(M.Model):\n\tdef initialize(self):\n\t\tself.c1 = M.ConvLayer(3, 64, pad='SAME_LEFT', stride=2, activation=M.PARAM_RELU, usebias=False, batch_norm=True)\n\t\tself.c2 = M.ConvLayer(3, 64, pad='SAME_LEFT', stride=2, activation=M.PARAM_RELU, usebias=False, batch_norm=True)\n\t\tself.layer1 = ResBlock(256, 1, 4)\n\t\tself.stage1 = Stage([32, 64], [1, 2], 1, 4, 1)\n\t\tself.stage2 = Stage([32, 64, 128], [None, None, 2], 2, 4, 4)\n\t\tself.stage3 = Stage([32, 64, 128, 256], [None,None,None,2], 3, 4, 3, d=True)\n\t\tself.lastfuse = FuseLast([32,64,128,256])\n\n\tdef forward(self, x):\n\t\tx = self.c1(x)\n\t\tx = self.c2(x)\n\t\tx = self.layer1(x)\n\t\tx = self.stage1([x,x])\n\t\tx = self.stage2(x)\n\t\tx = self.stage3(x)\n\t\tx = self.lastfuse(x)\n\t\treturn x \n\nclass HRNET(M.Model):\n\tdef initialize(self, num_pts):\n\t\tself.backbone = ResNet()\n\t\tself.lastconv = M.ConvLayer(1, num_pts)\n\tdef forward(self, x):\n\t\tx = self.backbone(x)\n\t\tx = self.lastconv(x)\n\t\treturn x \n",
"import hrnet \nimport numpy as np \nimport tensorflow as tf \nimport model3 as M \nimport data_reader\nfrom tqdm import tqdm \n\ndef grad_loss(x, model):\n\tdata, label, mask = x\n\t# print(label.max())\n\t# print(label.min())\n\twith tf.GradientTape() as tape:\n\t\tout = model(data)\n\t\t# print(tf.reduce_max(out))\n\t\t# print(tf.reduce_mean(out))\n\t\tloss = tf.reduce_mean(tf.square(out - label))\n\t\t# loss = tf.reduce_mean(tf.square(out - label), axis=[2,3])\n\t\t# loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask)\n\n\t\t# print(tf.reduce_max(out), tf.reduce_min(out))\n\tgrads = tape.gradient(loss, model.trainable_variables)\n\treturn grads, [loss]\n\nclass HRNET(M.Model):\n\tdef initialize(self, num_pts):\n\t\tself.backbone = hrnet.ResNet()\n\t\tself.lastconv = M.ConvLayer(1, num_pts, usebias=False)\n\tdef forward(self, x):\n\t\tfeat = self.backbone(x)\n\t\thmap = self.lastconv(feat)\n\t\treturn hmap\n\ntf.keras.backend.set_learning_phase(False)\nnet = HRNET(17)\nM.Saver(net.backbone).restore('./hrnet/')\n\noptim = tf.optimizers.Adam(0.0001, 0.5)\nsaver = M.Saver(net)\nsaver.restore('./model/')\n\n# initialize\n_ = np.zeros([1,256,256,3]).astype(np.float32)\nnet(_) \n\n# start training\nreader = data_reader.data_reader(16)\nMAX_ITER = 50000\n\nbar = tqdm(range(MAX_ITER+1))\nfor i in bar:\n\tbatch = reader.get_next()\n\tgrads, lss = grad_loss(batch, net)\n\toptim.apply_gradients(M.zip_grad(grads, net.trainable_variables))\n\tbar.set_description('Loss:%.4f'%lss[0])\n\tif i%2000==0 and i>0:\n\t\tsaver.save('./model/model.ckpt')\n",
"import network \nimport numpy as np \nimport tensorflow as tf \nimport model3 as M \nimport data_reader\n\ndef grad_loss(x, model):\n\tdata, label = x\n\twith tf.GradientTape() as tape:\n\t\tout = model(data)\n\t\tloss = tf.reduce_mean(tf.square(out - label))\n\t\tprint(tf.reduce_max(out), tf.reduce_min(out))\n\tgrads = tape.gradient(loss, model.trainable_variables)\n\treturn grads, [loss]\n\n\ntf.keras.backend.set_learning_phase(False)\nnet = network.PosePredNet(19)\nM.Saver(net.backbone).restore('./posedetnet/')\nM.Saver(net.head).restore('./posedetnet/')\n\noptim = tf.optimizers.Adam(0.0001)\nsaver = M.Saver(net)\nsaver.restore('./model/')\n\n# initialize\n_ = np.zeros([1,256,256,3]).astype(np.float32)\nnet(_) \n\n# start training\nreader = data_reader.data_reader(16)\nMAX_ITER = 100000\n\nfor i in range(MAX_ITER+1):\n\tbatch = reader.get_next()\n\tgrads, lss = grad_loss(batch, net)\n\toptim.apply_gradients(M.zip_grad(grads, net.trainable_variables))\n\tif i%10==0:\n\t\tprint('Iter:%d\\tLoss:%.4f'%(i, lss[0]))\n\tif i%100==0 and i>0:\n\t\tsaver.save('./model/model.ckpt')\n"
] |
[
[
"tensorflow.keras.backend.set_learning_phase",
"tensorflow.keras.optimizers.Adam",
"numpy.linalg.norm",
"numpy.float32"
],
[
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.placeholder",
"tensorflow.map_fn",
"tensorflow.variable_scope",
"numpy.float32",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.argmax"
],
[
"tensorflow.summary.FileWriter",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.zeros",
"tensorflow.get_collection",
"tensorflow.placeholder",
"numpy.linalg.norm",
"tensorflow.ones",
"tensorflow.summary.merge_all",
"tensorflow.name_scope",
"tensorflow.variable_scope",
"tensorflow.square",
"tensorflow.train.Saver",
"numpy.random.uniform",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.Session",
"tensorflow.summary.scalar",
"numpy.float32",
"numpy.zeros"
],
[
"tensorflow.matmul",
"tensorflow.ones_like",
"tensorflow.eye",
"tensorflow.zeros_like",
"tensorflow.GradientTape",
"tensorflow.square",
"tensorflow.optimizers.Adam",
"tensorflow.trace"
],
[
"numpy.eye",
"numpy.flip",
"numpy.float32"
],
[
"numpy.amax",
"matplotlib.pyplot.imshow",
"numpy.random.random",
"numpy.meshgrid",
"numpy.linspace",
"numpy.uint8",
"numpy.int32",
"numpy.float32",
"numpy.transpose",
"numpy.exp",
"numpy.zeros",
"numpy.where",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"torch.from_numpy",
"numpy.ones"
],
[
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.concat"
],
[
"torch.nn.functional.batch_norm",
"torch.nn.init.uniform_",
"torch.zeros",
"torch.no_grad",
"torch.nn.functional.tanh",
"torch.ones",
"torch.tensor",
"torch.nn.init.ones_",
"torch.nn.functional.relu",
"numpy.float32",
"torch.nn.functional.elu",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.nn.functional.linear",
"torch.nn.functional.max_pool2d",
"torch.sigmoid",
"torch.nn.functional.conv3d",
"torch.nn.init._no_grad_normal_",
"torch.nn.PReLU",
"torch.nn.functional.conv2d",
"torch.nn.functional.conv1d",
"torch.nn.functional.avg_pool2d",
"torch.nn.functional.leaky_relu",
"torch.nn.init.zeros_",
"torch._C._get_tracing_state",
"torch.Tensor"
],
[
"torch.nn.ModuleList",
"torch.nn.functional.interpolate"
],
[
"tensorflow.square",
"tensorflow.optimizers.Adam",
"tensorflow.keras.backend.set_learning_phase",
"numpy.zeros",
"tensorflow.GradientTape"
],
[
"tensorflow.reduce_max",
"tensorflow.reduce_min",
"tensorflow.square",
"tensorflow.optimizers.Adam",
"tensorflow.keras.backend.set_learning_phase",
"numpy.zeros",
"tensorflow.GradientTape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
seqsense/CenterNet
|
[
"5cd5f3c1f42d8cfb5fc3157f8c1945b6787f11eb"
] |
[
"src/lib/datasets/sample/bbox_sample.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch.utils.data as data\nimport numpy as np\nimport torch\nimport json\nimport cv2\nimport os\nfrom centernet_utils.image import flip, color_aug\nfrom centernet_utils.image import get_affine_transform, affine_transform\nfrom centernet_utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian\nfrom centernet_utils.image import draw_dense_reg\nimport math\n\nclass BBoxTaskDataset(data.Dataset):\n\n def _get_border(self, border, size):\n i = 1\n while size - border // i <= border // i:\n i *= 2\n return border // i\n\n def __getitem__(self, index):\n data_dict = self.image_datas[index]\n file_name = data_dict['file_name']\n objects = data_dict['objects']\n img_path = os.path.join(self.image_dir, file_name)\n num_objs = min(len(objects), self.max_objs)\n\n img = cv2.imread(img_path)\n\n height, width = img.shape[0], img.shape[1]\n c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)\n if self.opt.keep_res:\n input_h = (height | self.opt.pad) + 1\n input_w = (width | self.opt.pad) + 1\n s = np.array([input_w, input_h], dtype=np.float32)\n else:\n s = max(img.shape[0], img.shape[1]) * 1.0\n input_h, input_w = self.opt.input_h, self.opt.input_w\n \n flipped = False\n # if self.split == 'train':\n if not self.opt.not_rand_crop:\n s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))\n w_border = self._get_border(128, img.shape[1])\n h_border = self._get_border(128, img.shape[0])\n c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)\n c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)\n else:\n sf = self.opt.scale\n cf = self.opt.shift\n c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)\n c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)\n s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)\n \n if np.random.random() < self.opt.flip:\n flipped = True\n img = img[:, ::-1, :]\n c[0] = width - c[0] - 1\n \n\n trans_input = get_affine_transform(\n c, s, 0, [input_w, input_h])\n inp = cv2.warpAffine(img, trans_input, \n (input_w, input_h),\n flags=cv2.INTER_LINEAR)\n inp = (inp.astype(np.float32) / 255.)\n if not self.opt.no_color_aug:\n color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)\n inp = (inp - self.mean) / self.std\n inp = inp.transpose(2, 0, 1)\n\n output_h = input_h // self.opt.down_ratio\n output_w = input_w // self.opt.down_ratio\n num_classes = self.num_classes\n trans_output = get_affine_transform(c, s, 0, [output_w, output_h])\n\n hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)\n wh = np.zeros((self.max_objs, 2), dtype=np.float32)\n dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32)\n reg = np.zeros((self.max_objs, 2), dtype=np.float32)\n ind = np.zeros((self.max_objs), dtype=np.int64)\n reg_mask = np.zeros((self.max_objs), dtype=np.uint8)\n cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)\n cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8)\n \n draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \\\n draw_umich_gaussian\n\n gt_det = []\n for k in range(num_objs):\n obj = objects[k]\n bbox = obj['bbox']\n cls_id = obj['class_id']\n if flipped:\n bbox[[0, 2]] = width - bbox[[2, 0]] - 1\n bbox[:2] = affine_transform(bbox[:2], trans_output)\n bbox[2:] = affine_transform(bbox[2:], trans_output)\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n if h > 0 and w > 0:\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n radius = max(0, int(radius))\n radius = self.opt.hm_gauss if self.opt.mse_loss else radius\n ct = np.array(\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n ct_int = ct.astype(np.int32)\n draw_gaussian(hm[cls_id], ct_int, radius)\n wh[k] = 1. * w, 1. * h\n ind[k] = ct_int[1] * output_w + ct_int[0]\n reg[k] = ct - ct_int\n reg_mask[k] = 1\n cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]\n cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1\n if self.opt.dense_wh:\n draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[k], radius)\n gt_det.append([ct[0] - w / 2, ct[1] - h / 2, \n ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])\n \n ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh}\n if self.opt.dense_wh:\n hm_a = hm.max(axis=0, keepdims=True)\n dense_wh_mask = np.concatenate([hm_a, hm_a], axis=0)\n ret.update({'dense_wh': dense_wh, 'dense_wh_mask': dense_wh_mask})\n del ret['wh']\n elif self.opt.cat_spec_wh:\n ret.update({'cat_spec_wh': cat_spec_wh, 'cat_spec_mask': cat_spec_mask})\n del ret['wh']\n if self.opt.reg_offset:\n ret.update({'reg': reg})\n if self.opt.debug > 0:\n gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \\\n np.zeros((1, 6), dtype=np.float32)\n meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}\n ret['meta'] = meta\n return ret"
] |
[
[
"numpy.random.random",
"numpy.clip",
"numpy.arange",
"numpy.concatenate",
"numpy.random.randn",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jozhang97/Side-tuning
|
[
"dea345691fb7ee0230150fe56ddd644efdffa6ac",
"dea345691fb7ee0230150fe56ddd644efdffa6ac",
"dea345691fb7ee0230150fe56ddd644efdffa6ac",
"dea345691fb7ee0230150fe56ddd644efdffa6ac"
] |
[
"evkit/models/forward_inverse.py",
"evkit/utils/parallel.py",
"evkit/models/srl_architectures.py",
"tlkit/data/datasets/taskonomy_dataset.py"
] |
[
"from gym import spaces\nimport multiprocessing.dummy as mp\nimport multiprocessing\nimport numpy as np\nimport os\nimport torch\nimport torch\nimport torch.nn as nn\nfrom torch.nn import Parameter, ModuleList\nimport torch.nn.functional as F\n\nfrom evkit.rl.utils import init, init_normc_\nfrom evkit.utils.misc import is_cuda\nfrom evkit.preprocess import transforms\n\nimport pickle as pkl\n\ninit_ = lambda m: init(m,\n nn.init.orthogonal_,\n lambda x: nn.init.constant_(x, 0),\n nn.init.calculate_gain('relu'))\n\n################################\n# Inverse Models\n# Predict s_{t+1} | s_t, a_t\n################################\nclass ForwardModel(nn.Module):\n \n def __init__(self, state_shape, action_shape, hidden_size):\n super().__init__()\n self.fc1 = init_(nn.Linear(state_shape + action_shape[1], hidden_size))\n self.fc2 = init_(nn.Linear(hidden_size, state_shape))\n \n def forward(self, state, action):\n x = torch.cat([state, action], 1)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n################################\n# Inverse Models\n# Predict a_t | s_t, s_{t+1}\n################################\nclass InverseModel(nn.Module):\n \n def __init__(self, input_size, hidden_size, output_size):\n super().__init__()\n self.fc1 = init_(nn.Linear(input_size * 2, hidden_size))\n # Note to stoip gradient\n self.fc2 = init_(nn.Linear(hidden_size, output_size))\n\n def forward(self, phi_t, phi_t_plus_1):\n x = torch.cat([phi_t, phi_t_plus_1], 1)\n x = F.relu(self.fc1(x))\n logits = self.fc2(x)\n return logits\n# ainvprobs = nn.softmax(logits, dim=-1)",
"import torch\nimport torch.nn as nn\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nclass _CustomDataParallel(nn.Module):\n def __init__(self, model, device_ids):\n super(_CustomDataParallel, self).__init__()\n self.model = nn.DataParallel(model, device_ids=device_ids)\n self.model.to(device)\n num_devices = torch.cuda.device_count() if device_ids is None else len(device_ids)\n print(f\"{type(model)} using {num_devices} GPUs!\")\n\n def forward(self, *input, **kwargs):\n return self.model(*input, **kwargs)\n\n def __getattr__(self, name):\n try:\n return super().__getattr__(name)\n except AttributeError:\n return getattr(self.model.module, name)",
"import torch.nn as nn\nfrom torch.nn import Parameter, ModuleList\nimport torch.nn.functional as F\nimport torch\nimport multiprocessing\nimport numpy as np\nimport os\nfrom gym import spaces\nfrom torchvision.models import resnet18\nfrom evkit.rl.utils import init, init_normc_\nfrom evkit.preprocess import transforms\nimport torchvision as vision\nfrom evkit.models.architectures import FrameStacked, Flatten, atari_conv\n\ninit_ = lambda m: init(m,\n nn.init.orthogonal_,\n lambda x: nn.init.constant_(x, 0),\n nn.init.calculate_gain('relu'))\n\nN_CHANNELS = 3\ndef getNChannels():\n return N_CHANNELS\n\n########################\n# SRL\n########################\n\n\nclass BaseModelSRL(nn.Module):\n \"\"\"\n Base Class for a SRL network\n It implements a getState method to retrieve a state from observations\n \"\"\"\n\n def __init__(self):\n super(BaseModelSRL, self).__init__()\n \n def getStates(self, observations):\n \"\"\"\n :param observations: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n return self.forward(observations)\n\n def forward(self, x):\n raise NotImplementedError\n\n \n\n\n\nclass BaseModelAutoEncoder(BaseModelSRL):\n \"\"\"\n Base Class for a SRL network (autoencoder family)\n It implements a getState method to retrieve a state from observations\n \"\"\"\n def __init__(self, n_frames, n_map_channels=0, use_target=True, output_size=512): \n super(BaseModelAutoEncoder, self).__init__()\n self.output_size = output_size\n self.n_frames = 4\n self.n_frames = n_frames\n self.output_size = output_size\n self.n_map_channels = n_map_channels\n self.use_target = use_target\n self.use_map = n_map_channels > 0\n\n if self.use_map:\n self.map_tower = nn.Sequential(\n atari_conv(self.n_frames * self.n_map_channels),\n nn.Conv2d(32, 64, kernel_size=4, stride=1), #, padding=3, bias=False),\n nn.ReLU(inplace=True),\n )\n\n if self.use_target:\n self.target_channels = 3\n else:\n self.target_channels = 0\n # Inspired by ResNet:\n # conv3x3 followed by BatchNorm2d\n self.encoder_conv = nn.Sequential(\n # 224x224xN_CHANNELS -> 112x112x64\n nn.Conv2d(getNChannels(), 64, kernel_size=7, stride=2, padding=3, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1), # 56x56x64\n\n conv3x3(in_planes=64, out_planes=64, stride=1), # 56x56x64\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2), # 27x27x64\n\n conv3x3(in_planes=64, out_planes=64, stride=2), # 14x14x64\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2) # 6x6x64\n )\n\n self.decoder_conv = nn.Sequential(\n nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 13x13x64\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 27x27x64\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 55x55x64\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 111x111x64\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(64, getNChannels(), kernel_size=4, stride=2), # 224x224xN_CHANNELS\n )\n self.encoder = FrameStacked(self.encoder_conv, self.n_frames)\n self.conv1 = nn.Conv2d(self.n_frames * (64 + self.target_channels), 64, 3, stride=1) # c4 s 4\n self.flatten = Flatten()\n self.fc1 = init_(nn.Linear(64 * 4 * 4 * (self.use_map) + 64 * 4 * 4 * (1), 1024))\n self.fc2 = init_(nn.Linear(1024, self.output_size))\n\n def getStates(self, observations):\n \"\"\"\n :param observations: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n return self.encode(observations)\n\n def encode(self, x):\n \"\"\"\n :param x: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n# raise NotImplementedError\n self.encoder_conv(x)\n\n def decode(self, x):\n \"\"\"\n :param x: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n# raise NotImplementedError\n self.decoder_conv(x)\n\n def forward(self, x):\n \"\"\"\n :param x: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n x_taskonomy = x['taskonomy']\n if self.use_target:\n x_taskonomy = torch.cat([x_taskonomy, x[\"target\"]], dim=1)\n x_taskonomy = F.relu(self.conv1(x_taskonomy))\n if self.use_map:\n x_map = x['map']\n x_map = self.map_tower(x_map)\n x_taskonomy = torch.cat([x_map, x_taskonomy], dim=1)\n x = self.flatten(x_taskonomy)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n return x\n encoded = self.encode(x) \n# decoded = self.decode(encoded).view(input_shape)\n return encoded #, decoded\n \n \n \ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"\"\n From PyTorch Resnet implementation\n 3x3 convolution with padding\n :param in_planes: (int)\n :param out_planes: (int)\n :param stride: (int)\n \"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\n\ndef srl_features_transform(task_path, dtype=np.float32):\n ''' rescale_centercrop_resize\n \n Args:\n output_size: A tuple CxWxH\n dtype: of the output (must be np, not torch)\n \n Returns:\n a function which returns takes 'env' and returns transform, output_size, dtype\n '''\n _rescale_thunk = transforms.rescale_centercrop_resize((3, 224, 224))\n if task_path != 'pixels_as_state':\n# net = TaskonomyEncoder().cuda()\n net = nn.Sequential(\n # 224x224xN_CHANNELS -> 112x112x64\n nn.Conv2d(getNChannels(), 64, kernel_size=7, stride=2, padding=3, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1), # 56x56x64\n\n conv3x3(in_planes=64, out_planes=64, stride=1), # 56x56x64\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2), # 27x27x64\n\n conv3x3(in_planes=64, out_planes=64, stride=2), # 14x14x64\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2) # 6x6x64\n ).cuda()\n\n net.eval()\n if task_path != 'None':\n checkpoint = torch.load(task_path)\n# checkpoint = {k.replace('model.encoder_conv.', ''): v for k, v in checkpoint.items() if 'encoder_conv' in k}\n checkpoint = {k.replace('model.conv_layers.', '').replace('model.encoder_conv.', ''): v for k, v in checkpoint.items() if 'encoder_conv' in k or 'conv_layers' in k}\n net.load_state_dict(checkpoint)\n\n def encode(x):\n if task_path == 'pixels_as_state':\n return x\n with torch.no_grad():\n return net(x)\n \n def _features_transform_thunk(obs_space):\n rescale, _ = _rescale_thunk(obs_space)\n def pipeline(x):\n# x = rescale(x).view(1, 3, 224, 224)\n x = torch.Tensor(x).cuda()\n x = encode(x)\n return x.cpu()\n if task_path == 'pixels_as_state':\n raise NotImplementedError\n return pixels_as_state_pipeline, spaces.Box(-1, 1, (8, 16, 16), dtype)\n else:\n return pipeline, spaces.Box(-1, 1, (64, 6, 6), dtype)\n \n return _features_transform_thunk\n\n",
"from collections import namedtuple, Counter, defaultdict\nfrom tlkit.data.sequential_tasks_dataloaders import ConcatenatedDataLoader, CyclingDataLoader, ErrorPassingConcatenatedDataLoader, ErrorPassingCyclingDataLoader\nfrom tlkit.utils import SINGLE_IMAGE_TASKS, TASKS_TO_CHANNELS\nimport torch\nimport torch.utils.data as utils\nimport torchvision.transforms as transforms\nimport torchvision.datasets as ds\nimport torch.utils.data as data\nfrom tqdm import tqdm\nfrom PIL import Image, ImageFile\nimport numpy as np\nimport os\nimport torch.multiprocessing as mp\nfrom torch.utils.data import DataLoader\nimport warnings\n\nfrom tlkit.data.img_transforms import default_loader, get_transform\nfrom tlkit.data.splits import SPLIT_TO_NUM_IMAGES, taskonomy_no_midlevel as split_taskonomy_no_midlevel\n\nTRAIN_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['train']\nVAL_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['val']\nTEST_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['test']\n\n\nImageFile.LOAD_TRUNCATED_IMAGES = True # TODO Test this\n\n\nclass TaskonomyData(data.Dataset):\n '''\n Loads data for the Taskonomy dataset.\n This expects that the data is structured\n \n /path/to/data/\n rgb/\n modelk/\n point_i_view_j.png\n ... \n depth_euclidean/\n ... (other tasks)\n \n If one would like to use pretrained representations, then they can be added into the directory as:\n /path/to/data/\n rgb_encoding/\n modelk/\n point_i_view_j.npy\n ...\n \n Basically, any other folder name will work as long as it is named the same way.\n '''\n def __init__(self, data_path,\n tasks,\n buildings,\n transform=None,\n load_to_mem=False,\n zip_file_name=False,\n max_images=None):\n '''\n data_path: Path to data\n tasks: Which tasks to load. Any subfolder will work as long as data is named accordingly\n buildings: Which models to include. See `splits.taskonomy`\n transform: one transform per task.\n \n Note: This assumes that all images are present in all (used) subfolders\n '''\n self.return_tuple = True\n if isinstance(tasks, str):\n tasks = [tasks]\n transform = [transform] \n self.return_tuple = False\n \n self.buildings = buildings\n self.cached_data = {}\n self.data_path = data_path\n self.load_to_mem = load_to_mem\n self.tasks = tasks\n self.zip_file_name = zip_file_name\n\n self.urls = {task: make_dataset(os.path.join(data_path, task), buildings, max_images)\n for task in tasks}\n\n # Validate number of images\n n_images_task = [(len(obs), task) for task, obs in self.urls.items()]\n print(\"\\t\" + \" | \".join([\"{}: {}\".format(k, task) for task, k in n_images_task]))\n if max(n_images_task)[0] != min(n_images_task)[0]:\n print(\"Each task must have the same number of images. However, the max != min ({} != {}). Number of images per task is: \\n\\t{}\".format(\n max(n_images_task)[0], min(n_images_task)[0], \"\\n\\t\".join([str(t) for t in n_images_task])))\n\n # count number of frames per building per task\n all_buildings = defaultdict(dict)\n for task, obs in self.urls.items():\n c = Counter([url.split(\"/\")[-2] for url in obs])\n for building in c:\n all_buildings[building][task] = c[building]\n\n # find where the number of distinct counts is more than 1\n print('Removing data from the following buildings')\n buildings_to_remove = []\n for b, count in all_buildings.items():\n if len(set(list(count.values()))) > 1:\n print(f\"\\t{b}:\", count)\n buildings_to_remove.append(b)\n # [(len(obs), task) for task, obs in self.urls.items()]\n\n # redo the loading with fewer buildings\n buildings_redo = [b for b in buildings if b not in buildings_to_remove]\n self.urls = {task: make_dataset(os.path.join(data_path, task), buildings_redo)\n for task in tasks}\n n_images_task = [(len(obs), task) for task, obs in self.urls.items()]\n print(\"\\t\" + \" | \".join([\"{}: {}\".format(k, task) for task, k in n_images_task]))\n assert max(n_images_task)[0] == min(n_images_task)[0], \\\n \"Each task must have the same number of images. However, the max != min ({} != {}). Number of images per task is: \\n\\t{}\".format(\n max(n_images_task)[0], min(n_images_task)[0], \"\\n\\t\".join([str(t) for t in n_images_task]))\n self.size = max(n_images_task)[0]\n\n # Perhaps load some things into main memory\n if load_to_mem:\n print('Writing activations to memory')\n for t, task in zip(transform, tasks):\n self.cached_data[task] = [None] * len(self)\n for i, url in enumerate(self.urls[task]):\n self.cached_data[task][i] = t(default_loader(url))\n self.cached_data[task] = torch.stack(self.cached_data[task])\n# self.cached_data = torch.stack(self.cached_data)\n print('Finished writing some activations to memory')\n \n self.transform = transform\n\n\n def __len__(self):\n return self.size\n\n def __getitem__(self, index):\n fpaths = [self.urls[task][index] for task in self.tasks]\n \n if self.load_to_mem:\n result = tuple([self.cached_data[task][index] for task in self.tasks])\n else:\n result = [default_loader(path) for path in fpaths]\n if self.transform is not None:\n # result = [transform(tensor) for transform, tensor in zip(self.transform, result)]\n result_post = []\n for i, (transform, tensor) in enumerate(zip(self.transform, result)):\n try:\n result_post.append(transform(tensor))\n except Exception as e:\n print(self.tasks[i], transform, tensor)\n raise e\n result = result_post\n\n # handle 2 channel outputs\n for i in range(len(self.tasks)):\n task = self.tasks[i]\n base_task = [t for t in SINGLE_IMAGE_TASKS if t in task]\n if len(base_task) == 0:\n continue\n else:\n base_task = base_task[0]\n num_channels = TASKS_TO_CHANNELS[base_task]\n if 'decoding' in task and result[i].shape[0] != num_channels:\n assert torch.sum(result[i][num_channels:,:,:]) < 1e-5, 'unused channels should be 0.'\n result[i] = result[i][:num_channels,:,:]\n\n if self.zip_file_name:\n result = tuple(zip(fpaths, result))\n\n if self.return_tuple:\n return result\n else:\n return result[0]\n\n \n\ndef make_dataset(dir, folders=None, max_images=None):\n # folders are building names. If None, get all the images (from both building folders and dir)\n has_reached_capacity = lambda images, max_images: not max_images is None and len(images) >= max_images\n images = []\n dir = os.path.expanduser(dir)\n if not os.path.isdir(dir):\n assert \"bad directory\"\n\n for subfolder in sorted(os.listdir(dir)):\n subfolder_path = os.path.join(dir, subfolder)\n if os.path.isdir(subfolder_path) and (folders is None or subfolder in folders):\n for fname in sorted(os.listdir(subfolder_path)):\n path = os.path.join(subfolder_path, fname)\n if not has_reached_capacity(images, max_images):\n images.append(path)\n\n # If folders/buildings are not specified, use images in dir\n if folders is None and os.path.isfile(subfolder_path) and not has_reached_capacity(images, max_images):\n images.append(subfolder_path)\n\n return images\n\n\ndef get_dataloaders(data_path,\n tasks,\n batch_size=64,\n batch_size_val=4,\n zip_file_name=False,\n train_folders=TRAIN_BUILDINGS,\n val_folders=VAL_BUILDINGS,\n test_folders=TEST_BUILDINGS,\n transform=None,\n num_workers=0,\n load_to_mem=False,\n pin_memory=False,\n max_images=None):\n \"\"\"\n :param data_path: directory that data is stored at\n :param tasks: names of subdirectories to return observations from\n :param batch_size:\n :param zip_file_name: when returning an observation, this will zip the fpath to it. E.g. (/path/to/img.png, OBS)\n :param train_folders: in a big data dir, which subfolders contain our training data\n :param val_folders: in a big data dir, which subfolders contain our val data\n :param max_images: maximum number of images in any dataset\n :return: dictionary of dataloaders\n \"\"\"\n\n if transform is None:\n if isinstance(tasks, str):\n transform = get_transform(tasks)\n else:\n transform = [get_transform(task) if len(task.split(' ')) == 1 else get_transform(*task.split(' ')) for task in tasks]\n tasks = [t.split(' ')[0] for t in tasks] # handle special data operations\n\n if isinstance(train_folders, str):\n train_folders = split_taskonomy_no_midlevel[train_folders]['train']\n if isinstance(val_folders, str):\n val_folders = split_taskonomy_no_midlevel[val_folders]['val']\n if isinstance(test_folders, str):\n test_folders = split_taskonomy_no_midlevel[test_folders]['test']\n\n\n dataloaders = {}\n print(f\"Taskonomy dataset TRAIN folders: {train_folders}\")\n dataset = TaskonomyData(data_path, tasks, buildings=train_folders,\n transform=transform, zip_file_name=zip_file_name,\n load_to_mem=load_to_mem, max_images=max_images)\n if len(dataset) == 0:\n print(f'\\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)\n dataloaders['train'] = dataloader\n\n print(f\"Taskonomy dataset VAL folders: {val_folders}\")\n dataset = TaskonomyData(data_path, tasks, buildings=val_folders,\n transform=transform, zip_file_name=zip_file_name, load_to_mem=load_to_mem, max_images=max_images)\n\n if len(dataset) == 0:\n print(f'\\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')\n dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)\n dataloaders['val'] = dataloader\n\n print(f\"Taskonomy dataset TEST folders: {test_folders}\")\n dataset = TaskonomyData(data_path, tasks, buildings=test_folders,\n transform=transform, zip_file_name=zip_file_name, load_to_mem=load_to_mem, max_images=max_images)\n if len(dataset) == 0:\n print(f'\\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')\n dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)\n dataloaders['test'] = dataloader\n return dataloaders\n\n\ndef get_lifelong_dataloaders(data_path,\n sources,\n targets,\n masks,\n epochs_per_task=5,\n epochs_until_cycle=0,\n split='fullplus',\n batch_size=64,\n batch_size_val=4,\n transform=None,\n num_workers=0,\n load_to_mem=False,\n pin_memory=False,\n speedup_no_rigidity=False,\n max_images_per_task=None):\n\n phases = ['train', 'val', 'test']\n dataloaders = {phase: [] for phase in phases}\n\n if isinstance(masks, bool):\n masks = [masks] * len(sources)\n\n masks = [['mask_valid'] if mask else [] for mask in masks]\n\n for i, (source, target, mask) in enumerate(zip(sources, targets, masks)):\n print(f'# Task {i} dataloader: {source} -> {target}')\n tasks = source + target + mask\n dl = get_dataloaders(\n data_path,\n tasks,\n batch_size=batch_size,\n batch_size_val=batch_size_val,\n train_folders=split,\n val_folders=split,\n test_folders=split,\n transform=transform,\n num_workers=num_workers,\n load_to_mem=load_to_mem,\n pin_memory=pin_memory,\n max_images=max_images_per_task,\n )\n for phase in phases:\n dataloaders[phase].append(dl[phase])\n\n if speedup_no_rigidity:\n # For methods that do not forget (no intransigence) by construction.\n # In validation, we only compute task performance for just-trained task and next-to-be-trained task\n epoch_lengths = [len(dl.dataset) for dl in dataloaders['val']]\n epoch_length = min(epoch_lengths) if min(epoch_lengths) == max(epoch_lengths) else None\n\n dl_just_trained = CyclingDataLoader(dataloaders['val'], epochs_until_cycle=1, start_dl=0,\n epoch_length_per_dl=epoch_length)\n dl_next_to_be_trained = CyclingDataLoader(dataloaders['val'], epochs_until_cycle=0, start_dl=0,\n epoch_length_per_dl=epoch_length)\n dataloaders['val'] = ErrorPassingConcatenatedDataLoader([dl_just_trained, dl_next_to_be_trained], zip_idx=False)\n else:\n dataloaders['val'] = ErrorPassingConcatenatedDataLoader(dataloaders['val'])\n\n train_epoch_length = SPLIT_TO_NUM_IMAGES[split] if split is not None else min([len(dl.dataset) for dl in dataloaders['train']])\n dataloaders['train'] = ErrorPassingCyclingDataLoader(dataloaders['train'], epoch_length_per_dl=epochs_per_task * train_epoch_length, epochs_until_cycle=epochs_until_cycle)\n dataloaders['test'] = ErrorPassingConcatenatedDataLoader(dataloaders['test'])\n return dataloaders\n\n\n\n\n"
] |
[
[
"torch.nn.init.constant_",
"torch.nn.init.calculate_gain",
"torch.nn.Linear",
"torch.cat"
],
[
"torch.cuda.device_count",
"torch.nn.DataParallel",
"torch.cuda.is_available"
],
[
"torch.nn.init.calculate_gain",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.init.constant_",
"torch.load",
"torch.Tensor",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.no_grad",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.stack",
"torch.sum",
"torch.utils.data.DataLoader"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Bullldoger/NLA--project
|
[
"05e7a39ca43b6eea7e74ad62ea7de445414e1a2b"
] |
[
"notebooks/models.py"
] |
[
"import numpy as np\nimport scipy as scp\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.linalg import svds\nfrom collections import Counter\nfrom nltk.corpus import stopwords\n\nclass Word2Vec(object):\n \n def __init__(self, sentences):\n \"\"\"\n sentences -- preprocessed sentences of reviews\n vocab -- vocabulary of a corpus words; {words: index}\n D -- word-context co-occurence matrix\n W -- matrix of words embeddings\n C -- matrix of contexts embeddings\n d -- dimension of words and reviews embeddings\n \"\"\"\n\n self.sentences = sentences\n self.vocab = None\n self.D = None\n self.W = None\n self.C = None\n self.d = 200\n self.Wt = None\n self.Ct = None\n \n ###################### DATA PROCESSING ######################\n \n ###### Create vocabulary from given sentences ######\n \n def create_vocabulary(self, r=200):\n \"\"\"\n r -- word occurence treshold\n \"\"\"\n \n self.vocab = dict()\n word_count = dict()\n idx = 0\n \n print('Creating vocabulary')\n for sentence in self.sentences:\n for word in sentence:\n if word not in word_count:\n word_count[word] = 1\n else:\n word_count[word] += 1\n\n for word, count in word_count.items():\n if word_count[word] >= r:\n self.vocab[word] = idx\n idx += 1\n \n \n \n ###### Create word-context co-occurence matrix ######\n \n def create_corpus_matrix(self, L=2):\n \"\"\"\n L -- size of the sliding window\n \"\"\"\n \n print('Creating corpus matrix')\n # initialization\n words_counts = Counter()\n for sentence_index, sentence in enumerate(self.sentences):\n for word_index, word in enumerate(sentence):\n if word in self.vocab:\n around_indexes = [i for i in range(max(word_index - L, 0), \n min(word_index + L + 1, len(sentence))) \n if i != word_index]\n for occur_word_index in around_indexes:\n occur_word = sentence[occur_word_index]\n if occur_word in self.vocab:\n skipgram = (word, occur_word)\n if skipgram in words_counts:\n words_counts[skipgram] += 1\n else:\n words_counts[skipgram] = 1\n rows = list()\n cols = list()\n values = list()\n\n\n for (word_1, word_2), sharp in words_counts.items(): \n rows.append(self.vocab[word_1])\n cols.append(self.vocab[word_2])\n values.append(sharp)\n\n self.D = scp.sparse.csr_matrix((values, (rows, cols)))\n \n \n ###################### AUXILARY FUNCTIONS ######################\n \n ###### Sigmoid ######\n def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n \n ##### Loss function #####\n def loss(self, k):\n wc_ = self.D.sum()\n w_ = np.array(self.D.sum(axis=1))\n c_ = np.array(self.D.sum(axis=0))\n loss = self.D.toarray() * np.log(self.sigmoid(self.X)) + (k * w_ * c_ / wc_) * np.log(self.sigmoid(-self.X))\n return np.sum(loss)\n \n ###### Gradient of the objective function ######\n def grad(self, k):\n wc_ = self.D.sum()\n w_ = np.array(self.D.sum(axis=1))\n c_ = np.array(self.D.sum(axis=0))\n gr = self.D.toarray() * self.sigmoid(-self.X) - (k * w_ * c_ / wc_) * self.sigmoid(self.X)\n return gr\n\n ###################### DIFFERENT METHODS FOR WORD EMBEDDINGS COMPUTATION ######################\n \n ###### Create matrix of words embeddings by IMF ###### \n def compute_embedds_IMF(self, k, alpha=.5):\n \"\"\"\n k -- negative sampling hyperparameter\n alpha -- hyperparameter for matrix representation\n \"\"\"\n print('Computing of words embeddings')\n all_observations = self.D.sum()\n\n rows = []\n cols = []\n sppmi_values = []\n\n sum_over_words = np.array(self.D.sum(axis=0)).flatten()\n sum_over_contexts = np.array(self.D.sum(axis=1)).flatten()\n\n for word_index_1, word_index_2 in zip(self.D.nonzero()[0], \n self.D.nonzero()[1]):\n sg_count = self.D[word_index_1, word_index_2]\n\n pwc = sg_count\n pw = sum_over_contexts[word_index_1]\n pc = sum_over_words[word_index_2]\n\n spmi_value = np.log2(pwc * all_observations / (pw * pc * k))\n sppmi_value = max(spmi_value, 0)\n\n rows.append(word_index_1)\n cols.append(word_index_2)\n sppmi_values.append(sppmi_value)\n\n sppmi_mat = scp.sparse.csr_matrix((sppmi_values, (rows, cols)))\n U, S, V = scp.sparse.linalg.svds(sppmi_mat, self.d)\n self.W = U @ np.diag(np.power(S, alpha))\n self.C = np.diag(np.power(S, alpha)) @ V\n self.X = self.W @ self.C\n \n # SGNS objective\n print(\"Value of the SGNS's objective: \", self.loss(k))\n \n ###### Create matrix of words embeddings by Riemannian optimization ######\n def compute_embedds_riem(self, k, step=5e-5, max_iter=20, alpha=.5):\n self.X = self.W @ self.C\n U, S, Vt = np.linalg.svd(self.X, full_matrices=False)\n U, S, Vt = U[:, :self.d], S[:self.d], Vt[:self.d, :]\n \n for i in range(max_iter):\n print(f\"Value of the SGNS's objective on the {i} iteration: \\n {self.loss(k)}\")\n grad_step = self.X + step * self.grad(k)\n U, S = np.linalg.qr(grad_step @ Vt.T)\n V, St = np.linalg.qr(grad_step.T @ U)\n self.X = U @ S @ V.T\n \n U_, S_, Vt_ = np.linalg.svd(self.X)\n U_, S_, Vt_ = U_[:, :self.d], S_[:self.d], Vt_[:self.d, :]\n self.W = U_ @ np.power(np.diag(S_), alpha)\n \n ###### Create matrix of words embeddings by EMF (AMEMF) ###### \n def compute_embedds_EMF(self, k, step=1e-3, max_iter=50, eps=1e-3, iters=20):\n \"\"\"\n k -- negative sampling hyperparameter\n \"\"\"\n # initialization\n m = len(self.vocab)\n self.Wt = np.random.rand(self.d, m)\n self.Ct = np.random.rand(self.d, m)\n Wt_prvs = np.zeros(self.Wt.shape)\n Ct_prvs = np.zeros(self.Ct.shape)\n \n wc_ = self.D.sum()\n w_ = np.array(self.D.sum(axis=1))\n c_ = np.array(self.D.sum(axis=0))\n Q = self.D.toarray() + k * w_ * c_ / wc_\n \n error = lambda M, M_prvs: np.linalg.norm(M - M_prvs)\n \n for i in range(max_iter):\n print(f'{i} iteration')\n # minimize over C\n \n for j in range(iters):\n Wt_prvs = self.Wt\n E = Q * self.sigmoid(self.Ct.T @ self.Wt)\n self.Wt = self.Wt - step * self.Ct @ (E - self.D.toarray())\n print(error(self.Wt, Wt_prvs))\n \n \"\"\"if error(self.Wt, Wt_prvs) <= eps:\n break\"\"\"\n print('First loop finished')\n \n # minimize over W\n \n for j in range(iters):\n Ct_prvs = self.Ct\n E = Q * self.sigmoid(self.Ct.T @ self.Wt)\n self.Ct = self.Ct - (step * (E - self.D.toarray()) @ self.Wt.T).T\n \n \"\"\"if error(self.Ct, Ct_prvs) > eps:\n break\"\"\"\n print('Second loop finished')\n \n ###### Get vector embedding for a given word ######\n def get_word_embedding(self, word):\n if word in self.vocab:\n idx = self.vocab[word]\n return self.W[idx, :]\n else:\n print('This word is not in the vocabulary')\n \n ###### Get vector embedding for a given word ######\n def get_word_embedding2(self, word):\n if word in self.vocab:\n idx = self.vocab[word]\n return self.Wt.T[idx, :]\n else:\n print('This word is not in the vocabulary')\n \n ###################### REVIEW EMBEDDINGS ######################\n \n ##### Compute review embeddings #####\n def get_review_embedding(self, review):\n \"\"\"\n review -- current review to be embedded\n \"\"\"\n\n review_vec = np.zeros(self.d)\n words_count = 0\n stops = set(stopwords.words(\"english\"))\n\n for word in review:\n if (word in self.vocab) and not (word in stops):\n review_vec += self.get_word_embedding(word)\n words_count += 1\n review_vec /= words_count\n return review_vec\n \n ##### Compute review embeddings #####\n def get_review_embedding2(self, review):\n \"\"\"\n review -- current review to be embedded\n \"\"\"\n\n review_vec = np.zeros(self.d)\n words_count = 0\n stops = set(stopwords.words(\"english\"))\n\n for word in review:\n if (word in self.vocab) and not (word in stops):\n review_vec += self.get_word_embedding2(word)\n words_count += 1\n review_vec /= words_count\n return review_vec\n \n ##### Create matrix 'embeddings-reviews' #####\n def get_features_matrix(self, reviews):\n \"\"\"\n reviews -- the whole collection of reviews\n \"\"\"\n X = np.zeros((len(reviews), self.d))\n for idx, review in enumerate(reviews):\n X[idx, :] = self.get_review_embedding(review)\n return X \n \n def get_features_matrix2(self, reviews):\n \"\"\"\n reviews -- the whole collection of reviews\n \"\"\"\n X = np.zeros((len(reviews), self.d))\n for idx, review in enumerate(reviews):\n X[idx, :] = self.get_review_embedding2(review)\n return X "
] |
[
[
"numpy.diag",
"numpy.linalg.svd",
"numpy.log2",
"numpy.power",
"numpy.linalg.norm",
"scipy.sparse.csr_matrix",
"scipy.sparse.linalg.svds",
"numpy.random.rand",
"numpy.linalg.qr",
"numpy.exp",
"numpy.zeros",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
pazeshun/jsk_apc
|
[
"0ff42000ad5992f8a31e719a5360a39cf4fa1fde",
"0ff42000ad5992f8a31e719a5360a39cf4fa1fde",
"0ff42000ad5992f8a31e719a5360a39cf4fa1fde",
"0ff42000ad5992f8a31e719a5360a39cf4fa1fde",
"0ff42000ad5992f8a31e719a5360a39cf4fa1fde",
"0ff42000ad5992f8a31e719a5360a39cf4fa1fde"
] |
[
"demos/instance_occlsegm/instance_occlsegm_lib/datasets/apc/apc2016/mit.py",
"demos/selective_dualarm_stowing/node_scripts/alex_proba_estimation.py",
"demos/instance_occlsegm/examples/instance_occlsegm/instance_to_semantic/sample_roi_unpooling_2d.py",
"demos/instance_occlsegm/instance_occlsegm_lib/aug.py",
"demos/grasp_fusion/ros/grasp_fusion/node_scripts/mask_rcnn_instance_segmentation.py",
"demos/instance_occlsegm/instance_occlsegm_lib/contrib/instance_occlsegm/datasets/occlusion_segmentation.py"
] |
[
"import itertools\nimport os\nimport os.path as osp\n\nimport chainer\nimport numpy as np\nimport skimage.io\ntry:\n from sklearn.model_selection import train_test_split\nexcept ImportError:\n from sklearn.cross_validation import train_test_split\n\nfrom .base import class_names_apc2016\nimport instance_occlsegm_lib.data\nimport instance_occlsegm_lib.image\n\n\ndef ids_from_scene_dir(scene_dir, empty_scene_dir):\n for i_frame in itertools.count():\n empty_file = osp.join(\n empty_scene_dir, 'frame-{:06}.color.png'.format(i_frame))\n rgb_file = osp.join(\n scene_dir, 'frame-{:06}.color.png'.format(i_frame))\n segm_file = osp.join(\n scene_dir, 'segm/frame-{:06}.segm.png'.format(i_frame))\n if not (osp.exists(rgb_file) and osp.exists(segm_file)):\n break\n data_id = (empty_file, rgb_file, segm_file)\n yield data_id\n\n\ndef bin_id_from_scene_dir(scene_dir):\n caminfo = open(osp.join(scene_dir, 'cam.info.txt')).read()\n loc = caminfo.splitlines()[0].split(': ')[-1]\n if loc == 'shelf':\n bin_id = caminfo.splitlines()[1][-1]\n else:\n bin_id = 'tote'\n return bin_id\n\n\nclass MitAPC2016Dataset(chainer.dataset.DatasetMixin):\n\n class_names = class_names_apc2016\n datasets_dir = osp.expanduser('~/data/datasets/APC2016')\n\n def __init__(self, split, locations=('shelf', 'tote')):\n assert split in ['all', 'train', 'valid']\n self.split = split\n assert all(loc in ['shelf', 'tote'] for loc in locations)\n self._locations = locations\n self.dataset_dir = osp.join(self.datasets_dir, 'benchmark')\n if not osp.exists(self.dataset_dir):\n self.download()\n self._init_ids()\n\n def __len__(self):\n return len(self._ids[self.split])\n\n def _init_ids(self):\n data_ids = []\n # office\n contain_dir = osp.join(self.dataset_dir, 'office/test')\n for loc in self._locations:\n loc_dir = osp.join(contain_dir, loc)\n data_ids += self._get_ids_from_loc_dir('office', loc_dir)\n # warehouse\n contain_dir = osp.join(self.dataset_dir, 'warehouse')\n for sub in ['practice', 'competition']:\n sub_contain_dir = osp.join(contain_dir, sub)\n for loc in self._locations:\n loc_dir = osp.join(sub_contain_dir, loc)\n data_ids += self._get_ids_from_loc_dir('warehouse', loc_dir)\n ids_train, ids_valid = train_test_split(\n data_ids, test_size=0.25, random_state=5)\n self._ids = {'all': data_ids, 'train': ids_train, 'valid': ids_valid}\n\n def _get_ids_from_loc_dir(self, env, loc_dir):\n assert env in ('office', 'warehouse')\n loc = osp.basename(loc_dir)\n data_ids = []\n for scene_dir in os.listdir(loc_dir):\n scene_dir = osp.join(loc_dir, scene_dir)\n bin_id = bin_id_from_scene_dir(scene_dir)\n empty_dir = osp.join(\n self.dataset_dir, env, 'empty', loc, 'scene-{}'.format(bin_id))\n data_ids += list(ids_from_scene_dir(scene_dir, empty_dir))\n return data_ids\n\n def _load_from_id(self, data_id):\n empty_file, rgb_file, segm_file = data_id\n img = skimage.io.imread(rgb_file)\n img_empty = skimage.io.imread(empty_file)\n # Label value is multiplied by 9:\n # ex) 0: 0/6=0 (background), 54: 54/6=9 (dasani_bottle_water)\n lbl = skimage.io.imread(segm_file, as_gray=True) / 6\n lbl = lbl.astype(np.int32)\n # infer bin mask\n mask_fg = lbl > 0\n if mask_fg.sum() == 0:\n lbl[...] = -1\n else:\n y1, x1, y2, x2 = instance_occlsegm_lib.image.masks_to_bboxes(\n [mask_fg])[0]\n mask_bin = np.zeros_like(mask_fg)\n mask_bin[y1:y2, x1:x2] = True\n lbl[~mask_bin] = -1\n # copy object region in rgb image\n img_empty[mask_fg] = img[mask_fg]\n return img_empty, lbl\n\n def __getitem__(self, i):\n data_id = self._ids[self.split][i]\n img, lbl = self._load_from_id(data_id)\n return img, lbl\n\n def download(self):\n # XXX: this is optional\n # path = osp.join(self.datasets_dir, 'APC2016mit_training.zip')\n # fcn.data.cached_download(\n # url='https://drive.google.com/uc?id=0B4mCa-2YGnp7ZEMwcW5rcVBpeG8', # NOQA\n # path=path,\n # )\n instance_occlsegm_lib.data.download(\n url='https://drive.google.com/uc?id=0B9P1L--7Wd2vZHlSQjJSV0x4eXc',\n path=osp.join(self.datasets_dir, 'APC2016mit_benchmark.zip'),\n md5='bdb56b152a7cec0e652898338e519e79',\n postprocess=instance_occlsegm_lib.data.extractall,\n )\n",
"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport chainer.serializers as S\nfrom chainer import Variable\nimport numpy as np\n\nimport cv_bridge\nfrom jsk_recognition_msgs.msg import ClassificationResult\nfrom jsk_topic_tools import ConnectionBasedTransport\nfrom jsk_topic_tools.log_utils import logerr_throttle\nimport message_filters\nimport rospy\nfrom sensor_msgs.msg import Image\n\n\nclass AlexBatchNormalization(chainer.Chain):\n def __init__(self, n_class=1000):\n super(AlexBatchNormalization, self).__init__(\n conv1=L.Convolution2D(3, 96, 11, stride=4, pad=4),\n bn1=L.BatchNormalization(96),\n conv2=L.Convolution2D(96, 256, 5, stride=1, pad=1),\n bn2=L.BatchNormalization(256),\n conv3=L.Convolution2D(256, 384, 3, stride=1, pad=1),\n conv4=L.Convolution2D(384, 384, 3, stride=1, pad=1),\n conv5=L.Convolution2D(384, 256, 3, stride=1, pad=1),\n bn5=L.BatchNormalization(256),\n fc6=L.Linear(33280, 4096),\n fc7=L.Linear(4096, 4096),\n fc8=L.Linear(4096, n_class),\n )\n self.train = False\n\n def __call__(self, x, t=None):\n h = F.relu(self.bn1(self.conv1(x), test=not self.train))\n h = F.max_pooling_2d(h, 3, stride=2)\n h = F.relu(self.bn2(self.conv2(h), test=not self.train))\n h = F.max_pooling_2d(h, 3, stride=2)\n h = F.relu(self.conv3(h))\n h = F.relu(self.conv4(h))\n h = F.relu(self.bn5(self.conv5(h)))\n h = F.max_pooling_2d(h, 3, stride=3)\n h = F.dropout(F.relu(self.fc6(h)), train=self.train)\n h = F.dropout(F.relu(self.fc7(h)), train=self.train)\n h = self.fc8(h)\n self.proba = F.sigmoid(h)\n\n if t is None:\n assert not self.train\n return\n\n self.loss = F.softmax_cross_entropy(h, t)\n self.acc = F.accuracy(self.pred, t)\n if self.train:\n return self.loss\n\n\nclass AlexProbaEstimation(ConnectionBasedTransport):\n\n def __init__(self):\n super(self.__class__, self).__init__()\n self.gpu = rospy.get_param('~gpu', -1)\n model_h5 = rospy.get_param('~model_h5')\n self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])\n self.target_names = rospy.get_param('~target_names')\n self.model = AlexBatchNormalization(n_class=len(self.target_names))\n S.load_hdf5(model_h5, self.model)\n if self.gpu != -1:\n self.model.to_gpu(self.gpu)\n self.pub = self.advertise('~output', ClassificationResult,\n queue_size=1)\n self.pub_input = self.advertise(\n '~debug/net_input', Image, queue_size=1)\n\n def subscribe(self):\n # larger buff_size is necessary for taking time callback\n # http://stackoverflow.com/questions/26415699/ros-subscriber-not-up-to-date/29160379#29160379 # NOQA\n sub = message_filters.Subscriber(\n '~input', Image, queue_size=1, buff_size=2**24)\n sub_mask = message_filters.Subscriber(\n '~input/mask', Image, queue_size=1, buff_size=2**24)\n self.subs = [sub, sub_mask]\n queue_size = rospy.get_param('~queue_size', 10)\n if rospy.get_param('~approximate_sync', False):\n slop = rospy.get_param('~slop', 0.1)\n sync = message_filters.ApproximateTimeSynchronizer(\n self.subs, queue_size=queue_size, slop=slop)\n else:\n sync = message_filters.TimeSynchronizer(\n self.subs, queue_size=queue_size)\n sync.registerCallback(self._recognize)\n\n def unsubscribe(self):\n for sub in self.subs:\n sub.unregister()\n\n def _recognize(self, imgmsg, mask_msg=None):\n bridge = cv_bridge.CvBridge()\n bgr = bridge.imgmsg_to_cv2(imgmsg, desired_encoding='bgr8')\n if mask_msg is not None:\n mask = bridge.imgmsg_to_cv2(mask_msg)\n if mask.shape != bgr.shape[:2]:\n logerr_throttle(10,\n 'Size of input image and mask is different')\n return\n elif mask.size == 0:\n logerr_throttle(10, 'Size of input mask is 0')\n return\n bgr[mask < 128] = self.mean_bgr\n input_msg = bridge.cv2_to_imgmsg(bgr.astype(np.uint8), encoding='bgr8')\n input_msg.header = imgmsg.header\n self.pub_input.publish(input_msg)\n\n blob = (bgr - self.mean_bgr).transpose((2, 0, 1))\n x_data = np.array([blob], dtype=np.float32)\n if self.gpu != -1:\n x_data = chainer.cuda.to_gpu(x_data, device=self.gpu)\n x = Variable(x_data, volatile=True)\n\n self.model.train = False\n self.model(x)\n\n proba = chainer.cuda.to_cpu(self.model.proba.data)[0]\n cls_msg = ClassificationResult(\n header=imgmsg.header,\n labels=None,\n label_names=None,\n label_proba=None,\n probabilities=proba,\n target_names=self.target_names)\n self.pub.publish(cls_msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('alex_proba_estimation')\n app = AlexProbaEstimation()\n rospy.spin()\n",
"#!/usr/bin/env python\n\nimport os\nimport os.path as osp\n\nimport numpy as np\n\nimport instance_occlsegm_lib\nfrom instance_occlsegm_lib.contrib import instance_occlsegm\n\n\ndef get_data():\n dataset = instance_occlsegm_lib.datasets.apc.\\\n ARC2017InstanceSegmentationDataset(split='train')\n\n img, bboxes, labels, masks = dataset[0]\n fg_class_names = dataset.class_names\n class_names = tuple(['__background__'] + list(fg_class_names))\n n_fg_class = len(fg_class_names)\n\n n_instance = len(bboxes)\n mask_n_classes = []\n for i in range(n_instance):\n bbox = bboxes[i]\n label = labels[i]\n mask = masks[i]\n\n y1, x1, y2, x2 = bbox.astype(int)\n\n mask = mask[y1:y2, x1:x2]\n fg = mask.astype(bool)\n mask = mask.astype(np.float32)\n mask[fg] = np.random.uniform(0.75, 0.95, size=fg.sum())\n mask[~fg] = np.random.uniform(0.05, 0.25, size=(~fg).sum())\n mask = instance_occlsegm_lib.image.resize(mask, height=14, width=14)\n\n mask_n_class = np.zeros((n_fg_class, 14, 14))\n mask_n_class = mask_n_class.astype(np.float32)\n mask_n_class[label] = mask\n mask_n_classes.append(mask_n_class)\n mask_n_classes = np.asarray(mask_n_classes)\n\n return img, bboxes, labels, mask_n_classes, class_names\n\n\ndef main():\n out_dir = 'logs/sample_roi_unpooling_2d'\n try:\n os.makedirs(out_dir)\n except OSError:\n pass\n\n img, bboxes, labels, masks, class_names = get_data()\n\n x = masks\n outh, outw = img.shape[:2]\n rois = bboxes.astype(np.float32)\n roi_indices = np.zeros((len(rois), 1), dtype=np.float32)\n indices_and_rois = np.hstack((roi_indices, rois))\n\n y = instance_occlsegm.functions.roi_unpooling_2d(\n x,\n indices_and_rois,\n outb=1,\n outh=outh,\n outw=outw,\n spatial_scale=1,\n axes='yx',\n )\n y = y[0].array\n\n imgs = []\n for yi in y:\n # print(yi.min(), yi.max())\n imgs.append(instance_occlsegm_lib.image.colorize_heatmap(yi))\n viz = instance_occlsegm_lib.image.tile(imgs, boundary=True)\n instance_occlsegm_lib.io.imsave(osp.join(out_dir, '001.jpg'), viz)\n\n proba = y\n max_proba = proba.max(axis=0)\n viz = instance_occlsegm_lib.image.colorize_depth(max_proba)\n instance_occlsegm_lib.io.imsave(osp.join(out_dir, '002.jpg'), viz)\n bg = max_proba < 0.5\n lbl = np.argmax(proba, axis=0) + 1\n lbl[bg] = 0\n\n viz = instance_occlsegm_lib.image.label2rgb(\n lbl, img=img, label_names=class_names)\n instance_occlsegm_lib.io.imsave(osp.join(out_dir, '003.jpg'), viz)\n\n print('Write to:', out_dir)\n\n\nif __name__ == '__main__':\n main()\n",
"import warnings\n\nimport imgaug.augmenters as iaa\nimport imgaug.imgaug as ia\nfrom imgaug.parameters import Deterministic\nimport numpy as np\nimport skimage.measure\nimport skimage.transform\n\nimport instance_occlsegm_lib.image\n\n\ndef seg_dataset_to_object_data(seg_dataset, random=True, repeat=True,\n random_state=None, ignore_labels=None,\n one2one=True):\n if random and not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n\n while True:\n if random:\n indices = random_state.randint(\n 0, len(seg_dataset), len(seg_dataset))\n else:\n indices = np.arange(0, len(seg_dataset))\n\n for index in indices:\n img, lbl = seg_dataset[index]\n\n lbl += 1 # because regionprops ignores label 0\n regionprops = skimage.measure.regionprops(lbl)\n if random:\n random_state.shuffle(regionprops)\n for rp in regionprops:\n rp_label = rp.label - 1\n if ignore_labels and rp_label in ignore_labels:\n continue\n\n y1, x1, y2, x2 = rp.bbox\n img_ins = img[y1:y2, x1:x2]\n mask_ins = rp.filled_image\n\n yield {'label': rp_label, 'img': img_ins, 'mask': mask_ins}\n\n # an object from an image\n if one2one:\n # single object per single img, lbl pair\n # to avoid variation of object data\n break\n\n if not repeat:\n break\n\n\ndef augment_object_data(object_data, random_state=None, fit_output=True,\n aug_color=True, aug_geo=True, augmentations=None,\n random_order=False, scale=(0.5, 1.0)):\n try:\n iaa.Affine(fit_output=True)\n except TypeError:\n warnings.warn('Your imgaug does not support fit_output kwarg for'\n 'imgaug.augmenters.Affine. Please install via'\n '\\n\\n\\tpip install -e git+https://github.com/wkentaro/imgaug@affine_resize\\n\\n' # NOQA\n 'to enable it.')\n fit_output = False\n\n if random_state is None:\n random_state = np.random.RandomState()\n if augmentations is None:\n st = lambda x: iaa.Sometimes(0.3, x) # NOQA\n kwargs_affine = dict(\n order=1, # order=0 for mask\n cval=0,\n scale=scale,\n translate_px=(-16, 16),\n rotate=(-180, 180),\n shear=(-16, 16),\n mode='constant',\n )\n if fit_output:\n kwargs_affine['fit_output'] = fit_output\n augmentations = [\n st(iaa.WithChannels([0, 1], iaa.Multiply([1, 1.5]))\n if aug_color else iaa.Noop()),\n st(iaa.WithColorspace(\n 'HSV',\n children=iaa.WithChannels([1, 2], iaa.Multiply([0.5, 2])))\n if aug_color else iaa.Noop()),\n st(iaa.GaussianBlur(sigma=[0.0, 1.0])\n if aug_color else iaa.Noop()),\n iaa.Sometimes(0.8, iaa.Affine(**kwargs_affine)\n if aug_geo else iaa.Noop()),\n ]\n aug = iaa.Sequential(\n augmentations,\n random_order=random_order,\n random_state=ia.copy_random_state(random_state),\n )\n\n def activator_imgs(images, augmenter, parents, default):\n if isinstance(augmenter, iaa.Affine):\n augmenter.order = Deterministic(1)\n augmenter.cval = Deterministic(0)\n return True\n\n def activator_masks(images, augmenter, parents, default):\n white_lists = (\n iaa.Affine, iaa.PerspectiveTransform, iaa.Sequential, iaa.Sometimes\n )\n if not isinstance(augmenter, white_lists):\n return False\n if isinstance(augmenter, iaa.Affine):\n augmenter.order = Deterministic(0)\n augmenter.cval = Deterministic(0)\n return True\n\n def activator_lbls(images, augmenter, parents, default):\n white_lists = (\n iaa.Affine, iaa.PerspectiveTransform, iaa.Sequential, iaa.Sometimes\n )\n if not isinstance(augmenter, white_lists):\n return False\n if isinstance(augmenter, iaa.Affine):\n augmenter.order = Deterministic(0)\n augmenter.cval = Deterministic(-1)\n return True\n\n for objd in object_data:\n aug = aug.to_deterministic()\n objd['img'] = aug.augment_image(\n objd['img'], hooks=ia.HooksImages(activator=activator_imgs))\n if 'mask' in objd:\n objd['mask'] = aug.augment_image(\n objd['mask'], hooks=ia.HooksImages(activator=activator_masks))\n if 'lbl' in objd:\n objd['lbl'] = aug.augment_image(\n objd['lbl'], hooks=ia.HooksImages(activator=activator_lbls))\n if 'lbl_suc' in objd:\n objd['lbl_suc'] = aug.augment_image(\n objd['lbl_suc'],\n hooks=ia.HooksImages(activator=activator_lbls))\n if 'masks' in objd:\n masks = []\n for mask in objd['masks']:\n mask = aug.augment_image(\n mask,\n hooks=ia.HooksImages(activator=activator_masks),\n )\n masks.append(mask)\n masks = np.asarray(masks)\n objd['masks'] = masks\n del masks\n if 'lbls' in objd:\n lbls = []\n for lbl in objd['lbls']:\n lbl = aug.augment_image(\n lbl,\n hooks=ia.HooksImages(activator=activator_lbls),\n )\n lbls.append(lbl)\n lbls = np.asarray(lbls)\n objd['lbls'] = lbls\n del lbls\n yield objd\n\n\ndef stack_objects(img, lbl, object_data, region_label,\n random_state=None, stack_ratio=(0.2, 0.99),\n n_objects=(None, None),\n return_instances=False):\n # initialize outputs\n img, lbl = img.copy(), lbl.copy()\n lbl_suc = np.zeros(img.shape[:2], dtype=np.int32)\n lbl_suc.fill(-1)\n\n if random_state is None:\n random_state = np.random.RandomState()\n\n if isinstance(stack_ratio, tuple) and len(stack_ratio) == 2:\n stack_ratio = random_state.uniform(*stack_ratio)\n assert isinstance(stack_ratio, float)\n\n bboxes = []\n labels = []\n masks = []\n for i, objd in enumerate(object_data):\n img_h, img_w = img.shape[:2]\n ins_h, ins_w = objd['img'].shape[:2]\n\n mask_rg = lbl == region_label\n mask_labeled = lbl != -1\n\n # choose where to put\n Y_r, X_r = np.where(mask_rg)\n x_r = random_state.choice(X_r)\n y_r = random_state.choice(Y_r)\n x1 = max(x_r - ins_w / 2., 0)\n x2 = min(x1 + ins_w, img_w)\n y1 = max(y_r - ins_h / 2., 0)\n y2 = min(y1 + ins_h, img_h)\n x1, x2, y1, y2 = map(int, [x1, x2, y1, y2])\n ins_h = y2 - y1\n ins_w = x2 - x1\n\n img_ins = objd['img'][:ins_h, :ins_w]\n mask_ins = objd['mask'][:ins_h, :ins_w]\n if 'lbl_suc' in objd:\n lbl_suc_ins = objd['lbl_suc'][:ins_h, :ins_w]\n\n # put object on current objects\n mask_ins = mask_ins & mask_labeled[y1:y2, x1:x2]\n if mask_ins.sum() == 0:\n continue\n img[y1:y2, x1:x2][mask_ins] = img_ins[mask_ins]\n lbl[y1:y2, x1:x2][mask_ins] = objd['label']\n if 'lbl_suc' in objd:\n lbl_suc[y1:y2, x1:x2][mask_ins] = lbl_suc_ins[mask_ins]\n\n if return_instances:\n labels.append(objd['label'])\n mask = np.zeros((img_h, img_w), dtype=bool)\n mask[y1:y2, x1:x2][mask_ins] = True\n masks.append(mask)\n bbox = instance_occlsegm_lib.image.masks_to_bboxes([mask])[0]\n assert 0 <= bbox[0] <= img_h # y1\n assert 0 <= bbox[1] <= img_w # x1\n assert 0 <= bbox[2] <= img_h # y2\n assert 0 <= bbox[3] <= img_w # x2\n bboxes.append(bbox)\n\n mask_labeled = (lbl != -1)\n mask_objects = mask_labeled & (lbl != region_label)\n stack_ratio_t = 1. * mask_objects.sum() / mask_labeled.sum()\n if stack_ratio_t > stack_ratio:\n break\n\n if (all(isinstance(x, int) for x in n_objects) and\n not (n_objects[0] <= i <= n_objects[1])):\n break\n\n if return_instances:\n bboxes = np.asarray(bboxes, dtype=np.int32)\n labels = np.asarray(labels, dtype=np.int32)\n masks = np.asarray(masks, dtype=bool)\n\n return {\n 'img': img,\n 'lbl': lbl,\n 'lbl_suc': lbl_suc,\n 'bboxes': bboxes,\n 'labels': labels,\n 'masks': masks,\n }\n",
"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport sys\nfrom threading import Lock\n\nimport chainer\nimport numpy as np\n\ntry:\n import chainer_mask_rcnn\nexcept ImportError:\n print('''Please install chainer_mask_rcnn:\n\n sudo pip install chainer-mask-rcnn\n\n''', file=sys.stderr)\n sys.exit(1)\n\nimport cv_bridge\nfrom jsk_recognition_msgs.msg import ClusterPointIndices\nfrom jsk_recognition_msgs.msg import Label\nfrom jsk_recognition_msgs.msg import LabelArray\nfrom jsk_topic_tools import ConnectionBasedTransport\nfrom pcl_msgs.msg import PointIndices\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom std_srvs.srv import Empty\nfrom std_srvs.srv import EmptyResponse\n\n\nclass MaskRCNNInstanceSegmentation(ConnectionBasedTransport):\n\n def __init__(self):\n rospy.logwarn('This node is experimental, and its interface '\n 'can be changed in the future.')\n\n super(MaskRCNNInstanceSegmentation, self).__init__()\n # gpu\n self.gpu = rospy.get_param('~gpu', 0)\n if self.gpu >= 0:\n chainer.cuda.get_device_from_id(self.gpu).use()\n chainer.global_config.train = False\n chainer.global_config.enable_backprop = False\n\n self.fg_class_names = rospy.get_param('~fg_class_names')\n pretrained_model = rospy.get_param('~pretrained_model')\n\n n_fg_class = len(self.fg_class_names)\n self.model = chainer_mask_rcnn.models.MaskRCNNResNet(\n n_layers=50,\n n_fg_class=n_fg_class,\n pretrained_model=pretrained_model,\n )\n self.model.score_thresh = rospy.get_param('~score_thresh', 0.7)\n if self.gpu >= 0:\n self.model.to_gpu()\n\n self.lock = Lock()\n\n self.pub_indices = self.advertise(\n '~output/cluster_indices', ClusterPointIndices, queue_size=1)\n self.pub_labels = self.advertise(\n '~output/labels', LabelArray, queue_size=1)\n self.pub_lbl_cls = self.advertise(\n '~output/label_cls', Image, queue_size=1)\n self.pub_lbl_ins = self.advertise(\n '~output/label_ins', Image, queue_size=1)\n self.pub_viz = self.advertise(\n '~output/viz', Image, queue_size=1)\n\n self._bboxes = self._masks = self._labels = None\n self.srv_reset = rospy.Service('~reset', Empty, self.reset_callback)\n\n def subscribe(self):\n self.sub = rospy.Subscriber('~input', Image, self.callback,\n queue_size=1, buff_size=2**24)\n\n def unsubscribe(self):\n self.sub.unregister()\n\n def reset_callback(self, req):\n with self.lock:\n self._bboxes = None\n self._masks = None\n self._labels = None\n return EmptyResponse()\n\n def callback(self, imgmsg):\n bridge = cv_bridge.CvBridge()\n img = bridge.imgmsg_to_cv2(imgmsg, desired_encoding='rgb8')\n img_chw = img.transpose(2, 0, 1) # C, H, W\n\n bboxes, masks, labels, scores = self.model.predict([img_chw])\n bboxes = bboxes[0]\n masks = masks[0]\n labels = labels[0]\n # scores = scores[0]\n\n self.lock.acquire()\n if self._bboxes is not None:\n assert self._masks is not None\n assert self._labels is not None\n assert len(self._bboxes) == len(self._masks) == len(self._labels)\n\n reuse = [True] * len(self._bboxes)\n for i, mask_i in enumerate(self._masks):\n for mask_j in masks:\n iou = chainer_mask_rcnn.utils.get_mask_overlap(\n mask_i, mask_j\n )\n if iou > 0.3:\n reuse[i] = False\n break\n bboxes = np.r_[bboxes, self._bboxes[reuse]]\n masks = np.r_[masks, self._masks[reuse]]\n labels = np.r_[labels, self._labels[reuse]]\n\n self._bboxes = bboxes\n self._masks = masks\n self._labels = labels\n self.lock.release()\n\n msg_indices = ClusterPointIndices(header=imgmsg.header)\n msg_labels = LabelArray(header=imgmsg.header)\n # -1: label for background\n lbl_cls = - np.ones(img.shape[:2], dtype=np.int32)\n lbl_ins = - np.ones(img.shape[:2], dtype=np.int32)\n for ins_id, (mask, label) in enumerate(zip(masks, labels)):\n indices = np.where(mask.flatten())[0]\n indices_msg = PointIndices(header=imgmsg.header, indices=indices)\n msg_indices.cluster_indices.append(indices_msg)\n class_name = self.fg_class_names[label]\n msg_labels.labels.append(Label(id=label, name=class_name))\n lbl_cls[mask] = label\n lbl_ins[mask] = ins_id # instance_id\n self.pub_indices.publish(msg_indices)\n self.pub_labels.publish(msg_labels)\n\n msg_lbl_cls = bridge.cv2_to_imgmsg(lbl_cls)\n msg_lbl_ins = bridge.cv2_to_imgmsg(lbl_ins)\n msg_lbl_cls.header = msg_lbl_ins.header = imgmsg.header\n self.pub_lbl_cls.publish(msg_lbl_cls)\n self.pub_lbl_ins.publish(msg_lbl_ins)\n\n if self.pub_viz.get_num_connections() > 0:\n n_fg_class = len(self.fg_class_names)\n captions = ['{:d}: {:s}'.format(l, self.fg_class_names[l])\n for l in labels]\n viz = chainer_mask_rcnn.utils.draw_instance_bboxes(\n img, bboxes, labels + 1, n_class=n_fg_class + 1,\n masks=masks, captions=captions)\n msg_viz = bridge.cv2_to_imgmsg(viz, encoding='rgb8')\n msg_viz.header = imgmsg.header\n self.pub_viz.publish(msg_viz)\n\n\nif __name__ == '__main__':\n rospy.init_node('mask_rcnn_instance_segmentation')\n node = MaskRCNNInstanceSegmentation()\n rospy.spin()\n",
"import numpy as np\n\nfrom instance_occlsegm_lib.contrib import synthetic2d\n\n\nclass OcclusionSegmentationDataset(object):\n\n def __init__(self, split):\n assert split in ['train', 'test']\n data = synthetic2d.datasets.ARC2017OcclusionDataset(split)\n self._insta_data = data\n\n class_names = ['__background__'] + data.class_names.tolist()\n class_names = np.array(class_names)\n class_names.setflags(write=0)\n self.class_names = class_names\n\n def __len__(self):\n return len(self._insta_data)\n\n def __getitem__(self, i):\n img, bboxes, labels, masks = self._insta_data[i]\n\n labels += 1 # 0 represents background\n height, width = img.shape[:2]\n n_class = len(self.class_names)\n lbl_vis = np.zeros((height, width), dtype=np.int32)\n lbl_occ = np.zeros((height, width, n_class - 1), dtype=np.int32)\n for label, mask in zip(labels, masks):\n lbl_vis[mask == 1] = label\n lbl_occ[:, :, label - 1] = mask == 2\n\n return img, lbl_vis, lbl_occ\n\n\nif __name__ == '__main__':\n from .utils import view_occlusion_segmentation_dataset\n data = OcclusionSegmentationDataset('train')\n data.split = 'train'\n view_occlusion_segmentation_dataset(data)\n"
] |
[
[
"sklearn.cross_validation.train_test_split",
"numpy.zeros_like"
],
[
"numpy.array"
],
[
"numpy.asarray",
"numpy.hstack",
"numpy.argmax",
"numpy.zeros"
],
[
"numpy.asarray",
"numpy.random.RandomState",
"numpy.zeros",
"numpy.where"
],
[
"numpy.ones"
],
[
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
uchikun2493/nn_modules
|
[
"ad3486b842fc543561d39227de5daaa475d3513a"
] |
[
"samples/make_dataset.py"
] |
[
"import numpy as np\n\n# irisデータセットの読み込み\n# num_train: 学習データ数(残りはテストデータ)\n# random: ランダムに抽出するか\ndef load_iris(num_train=100, random=True):\n\n from sklearn.datasets import load_iris\n iris = load_iris()\n data = iris.data.astype(np.float32)\n label = iris.target.astype(np.int64)\n\n if random:\n perm = np.random.permutation(data.shape[0])\n a = perm[0:num_train]\n b = perm[num_train:]\n else:\n number = [i for i in range(len(data))]\n a = number[0:num_train]\n b = number[num_train:]\n\n train_data = data[a, :]\n train_teach = label[a]\n test_data = data[b, :]\n test_teach = label[b]\n\n return train_data, train_teach, test_data, test_teach\n\n"
] |
[
[
"numpy.random.permutation",
"sklearn.datasets.load_iris"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ishine/neurst
|
[
"2ba322393fcfed4261b33f4a657e12bbe321baaa",
"2ba322393fcfed4261b33f4a657e12bbe321baaa",
"2ba322393fcfed4261b33f4a657e12bbe321baaa",
"2ba322393fcfed4261b33f4a657e12bbe321baaa",
"2ba322393fcfed4261b33f4a657e12bbe321baaa"
] |
[
"examples/prune_tune/src/partial_trainer.py",
"tests/neurst/layers/decoders/transformer_decoder_test.py",
"examples/prune_tune/src/mask_sequence_generator.py",
"neurst/tasks/multilingual_translation.py",
"neurst/exps/validation.py"
] |
[
"# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport pickle\nfrom distutils.version import LooseVersion\n\nimport tensorflow as tf\nfrom absl import logging\n\nfrom examples.prune_tune.src.partial_tuning_optimizer import create_partial_tuning_optimizer\nfrom neurst.data.dataset_utils import map_data_for_keras\nfrom neurst.data.datasets.multiple_dataset import MultipleDataset\nfrom neurst.exps import register_exp\nfrom neurst.exps.trainer import Trainer\nfrom neurst.models.model_utils import summary_model_variables\nfrom neurst.optimizers.schedules import build_lr_schedule\nfrom neurst.sparsity.pruning_optimizer import create_pruning_optimizer\nfrom neurst.training import CustomCheckpointCallback, LearningRateScheduler, MetricReductionCallback, training_utils\nfrom neurst.training.gradaccum_keras_model import GradAccumKerasModel\nfrom neurst.utils import compat\nfrom neurst.utils.flags_core import Flag\n\n\n@register_exp(\"prune_tune_train\")\nclass PruneTuneTrainer(Trainer):\n \"\"\" Trainer for all tasks. \"\"\"\n\n def __init__(self, args, **kwargs):\n \"\"\" Initializes a util class for training neural models. \"\"\"\n super(PruneTuneTrainer, self).__init__(args, **kwargs)\n if args[\"mask_pkl\"]:\n with tf.io.gfile.GFile(args[\"mask_pkl\"], 'rb') as f:\n self.load_mask = pickle.load(f)\n else:\n self.mask_dir = os.path.join(self.model_dir, \"mask.pkl\")\n self.load_mask = None\n self._partial_tuning = args[\"partial_tuning\"]\n\n @staticmethod\n def class_or_method_args():\n this_args = super(PruneTuneTrainer, PruneTuneTrainer).class_or_method_args()\n this_args.extend(\n [Flag(\"partial_tuning\", dtype=Flag.TYPE.BOOLEAN, default=False,\n help=\"Train partial weights according to mask\"),\n Flag(\"mask_pkl\", dtype=Flag.TYPE.STRING, default=None,\n help=\"The file to the masks\")])\n return this_args\n\n def run(self):\n \"\"\" Training a neural model.\n\n Step 1: Create training model\n Step 2: Restore checkpoint/pretrain model/global_step if exists.\n Step 3: Fetch training data.\n Step 5: Fetch training training.\n Step 6: TRAIN!!!\n \"\"\"\n if self._hvd_backend == \"horovod\":\n import horovod.tensorflow.keras as hvd\n elif self._hvd_backend == \"byteps\":\n import byteps.tensorflow.keras as hvd\n\n tfds = training_utils.build_datasets(compat.ModeKeys.TRAIN, self.strategy,\n self.custom_dataset, self.task)\n if isinstance(self.custom_dataset, MultipleDataset):\n _tfds = None\n for _, ds in tfds.items():\n if _tfds is None:\n _tfds = ds\n else:\n _tfds = _tfds.concatenate(ds)\n tfds = _tfds\n tfds = tfds.prefetch(tf.data.experimental.AUTOTUNE)\n # Step 1: create a model\n with training_utils.get_strategy_scope(self.strategy):\n inps = self.task.create_inputs(compat.ModeKeys.TRAIN)\n formatted_inps = self.task.example_to_input(inps, compat.ModeKeys.TRAIN)\n model_out = self.model(formatted_inps, is_training=True)\n for metric_layer in self.task.build_metric_layer():\n model_out = metric_layer([formatted_inps, model_out])\n if (LooseVersion(tf.__version__) < LooseVersion(\"2.3\")\n or LooseVersion(tf.__version__) >= LooseVersion(\"2.5\")):\n logging.info(f\"Warning: Need further check on AccumgradKerasModel when TF version={tf.__version__}. \"\n f\"Here we ignore update_cycle={self._update_cycle}, \"\n f\"clip_value={self._clip_value}, clip_norm={self._clip_norm}.\")\n keras_model = tf.keras.Model(inps, model_out)\n elif compat.IS_PREV_TF_2_4_0:\n from neurst.training.gradaccum_keras_model import TF23GradAccumKerasModel\n keras_model = TF23GradAccumKerasModel(inps, model_out,\n update_cycle=self._update_cycle,\n clip_value=self._clip_value,\n clip_norm=self._clip_norm,\n freeze_variables=self._freeze_variables)\n else:\n keras_model = GradAccumKerasModel(inps, model_out,\n update_cycle=self._update_cycle,\n clip_value=self._clip_value,\n clip_norm=self._clip_norm,\n freeze_variables=self._freeze_variables)\n\n loss = self._criterion.reduce_loss(formatted_inps, model_out)\n if compat.is_tf_tensor(loss) or isinstance(loss, (list, tuple)):\n keras_model.add_loss(loss)\n elif isinstance(loss, dict):\n for _name, _loss in loss.items():\n keras_model.add_loss(_loss)\n keras_model.add_metric(_loss, name=_name + \"_mean\", aggregation=\"mean\")\n else:\n raise ValueError(\"criterion.reduce_loss returns \"\n \"unsupported value of type: {}\".format(type(loss)))\n self._restore_ckpt_or_pretrain()\n self._lr_schedule = build_lr_schedule(self._lr_schedule_args)\n if self._pruning_schedule is not None:\n self._optimizer = create_pruning_optimizer(self._optimizer, self.model, self._pruning_schedule,\n pruning_variable_pattern=self._pruning_variable_pattern,\n nopruning_variable_pattern=self._nopruning_variable_pattern,\n keep_prune_property=True)\n if self._partial_tuning is True:\n self._optimizer = create_partial_tuning_optimizer(self._optimizer, self.model, self.load_mask)\n self._optimizer = training_utils.handle_fp16_and_distributed_optimizer(\n self._optimizer, self._lr_schedule, self._hvd_backend)\n if self._hvd_backend is None:\n keras_model.compile(self._optimizer)\n else:\n # NOTE: we already add Horovod DistributedOptimizer in `_handle_fp16_and_distributed_optimizer`.\n # Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow\n # uses hvd.DistributedOptimizer() to compute gradients.\n keras_model.compile(self._optimizer, experimental_run_tf_function=False)\n keras_model.summary()\n summary_model_variables(self.model, self._freeze_variables)\n # initialize the checkpoint manager\n _ = compat.get_saver_or_default(self.model, self.model_dir, max_to_keep=self._checkpoints_max_to_keep)\n # build training training\n if not self._tb_log_dir:\n self._tb_log_dir = os.path.join(self.model_dir, \"train\")\n\n training_callbacks = [MetricReductionCallback(self.strategy, self._summary_steps, self._tb_log_dir,\n device=\"GPU:0\", lr_schedule=self._lr_schedule)]\n if self._hvd_backend is None or hvd.rank() == 0:\n training_callbacks.append(\n CustomCheckpointCallback(self.task.model_configs(self.model),\n save_checkpoint_steps=self._save_checkpoint_steps))\n if self._validator is not None:\n training_callbacks.append(self._validator.build(self.strategy, self.task, self.model))\n if self._hvd_backend is not None:\n # Horovod: average metrics among workers at the end of every epoch.\n #\n # Note: This callback must be in the list before the ReduceLROnPlateau,\n # TensorBoard or other metrics-based training.\n # NOTE!!! HERE we already integrate the metric averaging behaviour into the MetricReductionCallback.\n # training_callbacks.insert(0, hvd.callbacks.MetricAverageCallback(device=\"GPU:0\"))\n\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n training_callbacks.insert(0, hvd.callbacks.BroadcastGlobalVariablesCallback(0, device=\"GPU:0\"))\n if self._lr_schedule is not None:\n training_callbacks.append(LearningRateScheduler(self._lr_schedule))\n\n if self._experimental_count_batch_num:\n logging.info(\"Scanning the dataset......\")\n iterator = iter(training_utils.maybe_distribution_dataset(self.strategy, tfds))\n cnt = 0\n for _ in iterator:\n cnt += 1\n logging.info(f\"Total {cnt} batches per EPOCH.\")\n\n history = keras_model.fit(\n map_data_for_keras(tfds.repeat()),\n initial_epoch=0,\n epochs=1,\n steps_per_epoch=self._train_steps, # * args[\"update_cycle\"],\n verbose=2,\n callbacks=training_callbacks)\n logging.info(history.history)\n\n if self._pruning_schedule is not None:\n mask = [\n tf.Variable(\n (tf.cast(tf.math.not_equal(weight, 0.), weight.dtype.base_dtype)),\n dtype=weight.dtype.base_dtype,\n trainable=False,\n synchronization=tf.VariableSynchronization.ON_READ,\n aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) for weight in keras_model.trainable_weights\n ]\n # np.save(self.mask_dir, mask)\n with open(self.mask_dir, 'wb') as f:\n pickle.dump(mask, f)\n\n if self._partial_tuning is True:\n mask = self.load_mask\n # np.save(self.mask_dir, mask)\n saved_mask_dir = os.path.join(self.model_dir, \"mask.pkl\")\n with open(saved_mask_dir, 'wb') as f:\n pickle.dump(mask, f)\n",
"# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy\nimport tensorflow as tf\n\nfrom neurst.layers.decoders.transformer_decoder import TransformerDecoder\n\n\ndef test_transformer_decoder():\n dmodel = 4\n batch_size = 2\n num_layers = 1\n num_self_attention_heads = 2\n hidden_size = dmodel\n filter_size = 16\n self_attention_dropout_rate = 0.1\n ffn_dropout_rate = 0.1\n layer_postprocess_dropout_rate = 0.1\n # max_len = 4\n # max_decoder_len = 3\n\n decoder = TransformerDecoder(\n num_layers=num_layers,\n num_attention_heads=num_self_attention_heads,\n hidden_size=hidden_size,\n filter_size=filter_size,\n attention_dropout_rate=self_attention_dropout_rate,\n ffn_dropout_rate=ffn_dropout_rate,\n layer_postprocess_dropout_rate=layer_postprocess_dropout_rate)\n encoder_outputs = tf.convert_to_tensor(\n [[[-0.37282175, 0.62301564, -2.0221813, -0.00875833],\n [0.31516594, -1.117763, -1.0697726, 0.80373234],\n [-0.717022, 0.3300997, -0.44306225, 1.550383],\n [-1.5516962, 0.6025011, 1.8262954, 0.42469704]],\n\n [[-0.98617625, 2.2856202, -1.3063533, 0.4174998],\n [1.5724765, 1.2201295, 1.1479746, 0.7810888],\n [0.8343642, -1.073388, 1.2718492, -0.7290778],\n [-1.4126722, 1.8000795, -2.118672, -0.1366007]]], dtype=tf.float32)\n encoder_inputs_padding = tf.convert_to_tensor(\n [[0, 0, 0, 0], [0, 0, 1., 1.]], dtype=tf.float32)\n decoder_inputs = tf.convert_to_tensor(\n [[[8.6675537e-01, 2.2135425e-01, 1.4054185e+00, -4.2268831e-01],\n [1.9606155e+00, -1.8318410e+00, -1.8158482e+00, -3.7030798e-01],\n [-1.1357157e-03, 5.5629879e-01, 6.6107117e-02, -1.7330967e+00]],\n\n [[-1.1870812e+00, -5.4499257e-01, -8.6622888e-01, -7.4098641e-01],\n [2.2233427e-01, 5.3582352e-01, 3.0567116e-01, 1.0201423e-01],\n [-1.8053315e+00, 7.2125041e-01, 1.0072237e+00, -2.0333264e+00]]], dtype=tf.float32)\n # test for training\n cache = decoder.create_decoding_internal_cache(\n encoder_outputs, encoder_inputs_padding, is_inference=False)\n _ = decoder(decoder_inputs, cache, is_training=False)\n for w in decoder.trainable_weights:\n if \"layer_0/self_attention_prepost_wrapper/self_attention/output_transform/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[0.39332086, -0.3676856, -0.50203305, 0.6782059],\n [-0.41239128, -0.15406412, 0.3964849, -0.79016757],\n [0.6749844, -0.09548753, 0.16253561, -0.0560202],\n [-0.4699119, 0.82842, 0.35657936, -0.45770356]],\n dtype=tf.float32))\n elif \"layer_0/self_attention_prepost_wrapper/self_attention/qkv_transform/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[0.03949255, 0.32946128, 0.38817757, 0.47047406, 0.07609951,\n 0.03131855, 0.15958023, 0.3292094, 0.42809182, 0.27969742,\n 0.39156157, -0.604576],\n [0.4869359, -0.590637, 0.3092571, 0.10321742, 0.45608515,\n 0.27015948, 0.2959339, 0.32079375, 0.480197, -0.35878542,\n 0.04467481, 0.467416],\n [-0.40064478, -0.05089319, -0.0999378, -0.6048573, 0.4379304,\n 0.3692366, 0.39103013, 0.24920046, -0.37060317, -0.03119427,\n 0.25101495, -0.21076846],\n [0.42842942, 0.48276085, -0.2498649, -0.0978691, -0.01024461,\n -0.04072392, -0.43499938, -0.09718102, 0.18174142, 0.07100755,\n -0.6075252, -0.3018506]],\n dtype=tf.float32))\n elif \"layer_0/encdec_attention_prepost_wrapper/encdec_attention/output_transform/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[-0.31871676, 0.46451026, -0.32600254, -0.42110354],\n [0.45953768, -0.52176374, -0.47615638, -0.7818449],\n [0.7724063, -0.25975162, -0.49630436, 0.4681155],\n [0.7189149, 0.25591546, 0.2100411, -0.3439259]],\n dtype=tf.float32))\n elif \"layer_0/encdec_attention_prepost_wrapper/encdec_attention/q_transform/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[0.27346164, -0.12056953, 0.4617111, 0.3126462],\n [-0.65311253, 0.24505383, 0.56249744, -0.5582411],\n [-0.47464705, -0.60553044, 0.3019113, 0.33609575],\n [-0.24644238, -0.16026068, -0.0945828, -0.05111927]],\n dtype=tf.float32))\n elif \"layer_0/encdec_attention_prepost_wrapper/encdec_attention/kv_transform/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[-0.4204824, -0.23150605, 0.12045383, -0.6538836, 0.29070246,\n -0.38376695, 0.65055054, -0.51375425],\n [0.67025226, 0.0928542, -0.56662744, 0.12781924, -0.6193744,\n -0.61801594, 0.07964879, 0.16530299],\n [-0.06940353, -0.08732289, 0.24984497, 0.18489975, 0.5354368,\n -0.07608587, -0.5801205, -0.17658263],\n [0.54784423, -0.39817223, -0.11673075, 0.14106786, -0.1637184,\n 0.00750518, -0.44365695, -0.38458544]],\n dtype=tf.float32))\n elif \"layer_0/ffn_prepost_wrapper/ffn/dense1/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[-2.9522404e-01, -1.1858380e-01, 1.3743329e-01, -3.3782017e-01,\n -3.8876867e-01, 4.8396683e-01, 1.5062505e-01, -3.7749952e-01,\n -2.9512924e-01, -1.6212821e-02, -1.8608570e-04, -4.1960135e-01,\n 5.3800035e-01, 2.7734953e-01, 5.5179596e-03, -3.4055352e-02],\n [2.1051055e-01, 3.6151302e-01, 3.1045640e-01, -1.1510965e-01,\n 4.6738219e-01, 1.2504590e-01, -1.9454169e-01, 4.1786206e-01,\n -3.7045652e-01, 3.3854598e-01, -5.0978750e-01, 5.2220762e-01,\n 1.6077441e-01, -3.9631999e-01, 2.1259248e-01, 2.3286474e-01],\n [-1.0005751e-01, -5.0858349e-01, 3.6911082e-01, -5.1783592e-02,\n 7.1038425e-02, -1.1148521e-01, -5.3392905e-01, 3.6009926e-01,\n 7.9382658e-02, 1.0371411e-01, -5.0254786e-01, 1.7596281e-01,\n -9.2926025e-03, -6.4194202e-04, -1.4125884e-02, 4.7321141e-01],\n [2.8647327e-01, 2.6127762e-01, 4.5843053e-01, 4.9775457e-01,\n 3.8056010e-01, -4.0995055e-01, 3.6980593e-01, 3.3520699e-02,\n -1.8056035e-03, 1.6578972e-02, 1.6026449e-01, -2.4952739e-01,\n -3.1434530e-01, -1.3158950e-01, 7.9998970e-03, 1.1293548e-01]],\n dtype=tf.float32))\n elif \"layer_0/ffn_prepost_wrapper/ffn/dense2/kernel\" in w.name:\n tf.compat.v1.assign(w, tf.convert_to_tensor(\n [[0.2794218, 0.29263318, 0.42604703, -0.24461824],\n [0.32469118, -0.2654639, 0.17872995, 0.06222689],\n [-0.07604656, -0.29360557, -0.462821, 0.3731665],\n [0.27989155, 0.53663385, -0.12042063, 0.34913152],\n [-0.50028926, 0.08958912, 0.50753117, -0.03860039],\n [0.12980306, -0.47548878, 0.5443562, -0.41777247],\n [0.16824102, -0.5271052, -0.18454444, 0.2987221],\n [0.22610295, -0.3761598, 0.4983195, 0.31664205],\n [-0.36606842, -0.3778124, 0.01393354, 0.23516071],\n [0.26510388, -0.47218412, 0.42749757, 0.22174352],\n [0.4139307, 0.09682184, -0.1447433, -0.07231569],\n [0.01711905, -0.18132755, 0.03224993, 0.2071482],\n [0.12195373, -0.52764714, 0.48840046, -0.21843264],\n [0.12467605, -0.45452338, 0.05892056, -0.2852741],\n [-0.5464495, -0.4856094, -0.29271287, 0.10828984],\n [0.37080926, 0.01543814, 0.10875225, -0.2678996]],\n dtype=tf.float32))\n\n assert numpy.sum((decoder(decoder_inputs, cache, is_training=False).numpy()\n - numpy.array([[[0.4727962, -0.6863654, 1.387909, -1.1743398],\n [1.4770155, -1.2802002, 0.18456227, -0.38137752],\n [0.6776164, -0.4934968, 1.1886327, -1.3727522]],\n [[-1.6973993, 0.26954588, 0.59817475, 0.82967865],\n [-1.6315649, -0.0030859, 0.7861572, 0.8484935],\n [-1.4942819, 0.42606276, 1.246516, -0.17829692]]])) ** 2) < 1e-9\n\n # for inference\n cache = decoder.create_decoding_internal_cache(\n encoder_outputs, encoder_inputs_padding, is_inference=True)\n decoder_inputs = tf.convert_to_tensor(\n [[1.9606155e+00, -1.8318410e+00, -1.8158482e+00, -3.7030798e-01],\n [-1.1357157e-03, 5.5629879e-01, 6.6107117e-02, -1.7330967e+00]], dtype=tf.float32)\n assert numpy.sum(\n (decoder(decoder_inputs, cache, is_training=False).numpy()\n - numpy.array([[1.4581295, -1.3640043, -0.1138487, 0.01972346],\n [-0.06228875, -1.0514979, 1.6223053, -0.5085185]])) ** 2) < 1e-9\n assert numpy.sum(\n (cache[\"decoding_states\"][\"layer_0\"][\"self_attention\"][\"keys\"].numpy()\n - numpy.array(numpy.reshape([[[-0.63596207, -0.49432975, -0.36614707, 0.03477353]],\n [[0.6539597, 0.4846998, 1.2206339, 0.67560077]]],\n [batch_size, 1, num_self_attention_heads,\n hidden_size // num_self_attention_heads]))) ** 2) < 1e-9\n assert numpy.sum(\n (cache[\"decoding_states\"][\"layer_0\"][\"self_attention\"][\"values\"].numpy()\n - numpy.array(numpy.reshape([[[0.6045396, 0.78576076, 0.3205938, -1.2158906]],\n [[0.14660448, -0.38737938, 1.2869109, 0.6795136]]],\n [batch_size, 1, num_self_attention_heads,\n hidden_size // num_self_attention_heads]))) ** 2) < 1e-9\n\n\nif __name__ == \"__main__\":\n test_transformer_decoder()\n",
"# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pickle\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom absl import logging\n\nfrom neurst.exps import register_exp\nfrom neurst.exps.sequence_generator import SequenceGenerator\nfrom neurst.utils import compat\nfrom neurst.utils.flags_core import Flag\n\n\n@register_exp([\"mask_predict\", \"mask_generation\"])\nclass MaskSequenceGenerator(SequenceGenerator):\n \"\"\" Entry for sequence generation. \"\"\"\n\n def __init__(self, args, **kwargs):\n \"\"\" Initializes a util class for sequence generation. \"\"\"\n self._loaded_mask = None\n if args[\"mask_pkl\"]:\n logging.info(f\"Loading mask from {args['mask_pkl']}\")\n with tf.io.gfile.GFile(args[\"mask_pkl\"], 'rb') as f:\n self._loaded_mask = pickle.load(f)\n super(MaskSequenceGenerator, self).__init__(args, **kwargs)\n\n @staticmethod\n def class_or_method_args():\n this_flags = super(MaskSequenceGenerator, MaskSequenceGenerator).class_or_method_args()\n this_flags.append(Flag(\"mask_pkl\", dtype=Flag.TYPE.STRING, default=None,\n help=\"The path to the mask pkl file.\"), )\n return this_flags\n\n @staticmethod\n def build_generation_model(task, model, search_layer, output_sequence_only=True):\n \"\"\" Build keras model for generation.\n\n Args:\n task: The task object.\n model: An instance of neurst.models.model.BaseModel\n search_layer: A sequence search object.\n output_sequence_only: Only generated sequences will output if True.\n\n Returns: the generation model.\n \"\"\"\n if search_layer is None:\n raise ValueError(\n \"The parameters for generation method must be provided: \"\n \"search_method, search_method.params, ...\")\n inps = task.create_inputs(compat.ModeKeys.INFER)\n formatted_inps = task.example_to_input(inps, compat.ModeKeys.INFER)\n search_layer.set_model(model)\n generation_ops = search_layer(formatted_inps)\n if output_sequence_only:\n generation_ops = generation_ops[0]\n keras_model = tf.keras.Model(inps, generation_ops)\n return keras_model\n\n def apply_mask(self, model, masks):\n tuples = []\n for (weight, mask) in list(zip(model.trainable_weights, masks)):\n masked_weight = weight * tf.cast(mask, weight.dtype.base_dtype)\n tuples.append((weight, masked_weight))\n\n K.batch_set_value(tuples)\n\n def _build_and_restore_model(self):\n \"\"\" Build a single model or ensemble model. \"\"\"\n model = super(MaskSequenceGenerator, self)._build_and_restore_model()\n if self._loaded_mask is not None:\n self.apply_mask(model, self._loaded_mask)\n return model\n",
"# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport tensorflow as tf\nfrom absl import logging\n\nfrom neurst.data import dataset_utils\nfrom neurst.data.data_pipelines.multilingual_text_data_pipeline import MultilingualTextDataPipeline\nfrom neurst.layers.metric_layers.token_metric_layers import BatchCountMetricLayer, SequenceTokenMetricLayer\nfrom neurst.metrics import build_metric\nfrom neurst.models import build_model\nfrom neurst.models.model_utils import deduce_text_length\nfrom neurst.tasks import register_task\nfrom neurst.tasks.task import Task\nfrom neurst.training.training_utils import maximum_lower_multiple, minimal_multiple\nfrom neurst.utils import compat\nfrom neurst.utils.configurable import deep_merge_dict\nfrom neurst.utils.flags_core import Flag\n\n_TRG_LANG_TAG_POSITIONS = [\"source\", \"target\", \"src\", \"trg\"]\n\n\n@register_task\nclass MultilingualTranslation(Task):\n \"\"\" Defines the translation task. \"\"\"\n\n def __init__(self, args):\n \"\"\" Initializes the task.\n\n Args:\n args: A dict of model configurations.\n \"\"\"\n super(MultilingualTranslation, self).__init__(args)\n self._multilingual_dp = MultilingualTextDataPipeline(\n vocab_path=args[\"vocab_path\"], spm_model=args[\"spm_model\"],\n languages=args[\"languages\"])\n self._with_src_lang_tag = args[\"with_src_lang_tag\"]\n self._trg_lang_tag_position = args[\"trg_lang_tag_position\"]\n assert self._trg_lang_tag_position in _TRG_LANG_TAG_POSITIONS\n\n @staticmethod\n def class_or_method_args():\n this_args = super(MultilingualTranslation, MultilingualTranslation).class_or_method_args()\n this_args.extend([\n # for creating multilingual pipeline\n Flag(\"vocab_path\", dtype=Flag.TYPE.STRING,\n help=\"The path to the vocabulary file, or a list of word tokens.\"),\n Flag(\"spm_model\", dtype=Flag.TYPE.STRING,\n help=\"The path to the sentence piece model.\"),\n Flag(\"languages\", dtype=Flag.TYPE.STRING,\n help=\"A list of languages. The corresponding language tags \"\n \"will automatically append to the vocabulary. \"),\n # for preprocessing data\n Flag(\"max_src_len\", dtype=Flag.TYPE.INTEGER, default=80,\n help=\"The maximum source length of training data.\"),\n Flag(\"max_trg_len\", dtype=Flag.TYPE.INTEGER, default=80,\n help=\"The maximum target length of training data.\"),\n Flag(\"truncate_src\", dtype=Flag.TYPE.BOOLEAN, default=None,\n help=\"Whether to truncate source to max_src_len.\"),\n Flag(\"truncate_trg\", dtype=Flag.TYPE.BOOLEAN, default=None,\n help=\"Whether to truncate target to max_trg_len.\"),\n # for batching dataset\n Flag(\"batch_by_tokens\", dtype=Flag.TYPE.BOOLEAN, default=None,\n help=\"Whether to batch the data by word tokens.\"),\n Flag(\"with_src_lang_tag\", dtype=Flag.TYPE.STRING, default=False,\n help=\"Whether to append the source language tag at the beginning of the source sentence.\"),\n Flag(\"trg_lang_tag_position\", dtype=Flag.TYPE.STRING, default=\"trg\",\n choices=_TRG_LANG_TAG_POSITIONS,\n help=\"The position where the target language tag will be appended\"),\n ])\n return this_args\n\n def get_config(self):\n return {\n \"vocab_path\": self._args[\"vocab_path\"],\n \"spm_model\": self._args[\"spm_model\"],\n \"languages\": self._args[\"languages\"],\n \"with_src_lang_tag\": self._with_src_lang_tag,\n \"trg_lang_tag_position\": self._trg_lang_tag_position,\n }\n\n def inputs_signature(self, mode):\n \"\"\" Returns the input dtypes and signatures. \"\"\"\n dtypes = {\"feature\": tf.int64, \"src_lang\": tf.int64, \"trg_lang\": tf.int64}\n signatures = {\"feature\": tf.TensorShape([None, None]),\n \"src_lang\": tf.TensorShape([None, ]),\n \"trg_lang\": tf.TensorShape([None, ])}\n if mode == compat.ModeKeys.INFER:\n return dtypes, signatures\n dtypes[\"label\"] = tf.int64\n signatures[\"label\"] = tf.TensorShape([None, None])\n return dtypes, signatures\n\n def build_model(self, args, name=None):\n \"\"\" Builds and return a keras model. \"\"\"\n model = build_model(args, self._multilingual_dp.meta,\n self._multilingual_dp.meta, name=name)\n return model\n\n def example_to_input(self, batch_of_data: dict, mode) -> dict:\n \"\"\" Transform the data examples to model acceptable inputs.\n\n Args:\n batch_of_data: A data tensor with shape [batch, ...]\n mode: The running mode.\n\n Returns: The input data for model.\n \"\"\"\n src = batch_of_data[\"feature\"]\n if self._trg_lang_tag_position in [\"src\", \"source\"]:\n src = tf.concat([tf.expand_dims(batch_of_data[\"trg_lang\"], axis=1), src], axis=1)\n if self._with_src_lang_tag:\n src = tf.concat([tf.expand_dims(batch_of_data[\"src_lang\"], axis=1), src], axis=1)\n\n input_dict = {\"src\": src,\n \"src_length\": deduce_text_length(src, self._multilingual_dp.meta[\"pad_id\"],\n self._multilingual_dp.meta[\"padding_mode\"])}\n if self._trg_lang_tag_position in [\"trg\", \"target\"]:\n target_bos = batch_of_data[\"trg_lang\"]\n else:\n target_bos = tf.tile([tf.convert_to_tensor(\n self._multilingual_dp.meta[\"bos_id\"], dtype=tf.int64)], [tf.shape(src)[0]])\n if mode == compat.ModeKeys.INFER:\n input_dict[\"trg_input\"] = target_bos\n else:\n input_dict[\"trg\"] = batch_of_data[\"label\"]\n input_dict[\"trg_length\"] = deduce_text_length(batch_of_data[\"label\"],\n self._multilingual_dp.meta[\"pad_id\"],\n self._multilingual_dp.meta[\"padding_mode\"])\n input_dict[\"trg_input\"] = tf.concat([tf.expand_dims(target_bos, axis=1),\n batch_of_data[\"label\"][:, :-1]], axis=1)\n return input_dict\n\n def get_data_postprocess_fn(self, data_status, **kwargs) -> callable:\n if data_status == compat.DataStatus.PROJECTED:\n return self._multilingual_dp.decode\n elif data_status == compat.DataStatus.PROCESSED:\n return self._multilingual_dp.postprocess\n return lambda x: x\n\n def get_data_preprocess_fn(self, mode, data_status=compat.DataStatus.RAW, args=None) -> callable:\n \"\"\" Preprocess data sample according to this task.\n\n Args:\n args: A dict containing dataset arguments.\n mode: A ModeKeys indicating the running mode.\n data_status: The status of the data sample.\n\n Returns: A callable function to collate (process) a data sample.\n \"\"\"\n if args is None:\n args = self._args\n else:\n args = deep_merge_dict(self._args, args, local_overwrite=False)\n truncate_src = args.get(\"truncate_src\", None)\n truncate_trg = args.get(\"truncate_trg\", None)\n max_src_len = args.get(\"max_src_len\", None)\n max_trg_len = args.get(\"max_trg_len\", None)\n\n def _process_and_truncate(text, trunc, max_len):\n if data_status != compat.DataStatus.PROJECTED:\n text = self._multilingual_dp.encode(\n text, is_processed=(data_status == compat.DataStatus.PROCESSED))\n if mode == compat.ModeKeys.TRAIN and trunc and max_len:\n if compat.is_tf_tensor(text):\n text = tf.cond(\n tf.less_equal(tf.size(text), max_len), lambda: text,\n lambda: tf.concat([text[:(max_len - 1)], text[-1:]], axis=0))\n elif len(text) > max_len:\n text = text[:(max_len - 1)] + text[-1:]\n return text\n\n def _process_lang(lang):\n if not compat.is_tf_tensor(lang) and isinstance(lang, str):\n return self._multilingual_dp.meta[\"lang2id\"][lang]\n assert isinstance(lang, int)\n return lang\n\n if mode == compat.ModeKeys.INFER:\n return lambda data: {\n \"feature\": _process_and_truncate(data[\"feature\"], truncate_src, max_src_len),\n \"src_lang\": _process_lang(data[\"src_lang\"]),\n \"trg_lang\": _process_lang(data[\"trg_lang\"]), }\n return lambda data: {\n \"feature\": _process_and_truncate(data[\"feature\"], truncate_src, max_src_len),\n \"label\": _process_and_truncate(data[\"label\"], truncate_trg, max_trg_len),\n \"src_lang\": _process_lang(data[\"src_lang\"]),\n \"trg_lang\": _process_lang(data[\"trg_lang\"]),\n }\n\n def create_and_batch_tfds(self, ds, mode,\n args=None, num_replicas_in_sync=1) -> tf.data.Dataset:\n \"\"\" Creates a dataset according to the `mode`.\n\n Args:\n args: A dict containing dataset arguments.\n ds: A neurst.data.datasets.Dataset object.\n mode: A ModeKeys indicating the running mode.\n num_replicas_in_sync: The number of GPUs or other workers. We will generate global\n batches, and each global batch is equally divisible by number of replicas.\n\n Returns:\n A tf.data.Dataset.\n \"\"\"\n if args is None:\n args = self._args\n else:\n args = deep_merge_dict(self._args, args, local_overwrite=False)\n eos = tf.constant(self._multilingual_dp.meta[\"eos_id\"], dtype=tf.int64)\n int_zero = tf.zeros([], dtype=tf.int64)\n\n dataset = ds.build(map_func=self.get_data_preprocess_fn(mode, ds.status, args),\n map_output_dtypes=self.inputs_signature(mode)[0],\n auto_shard=(mode == compat.ModeKeys.TRAIN),\n shuffle=(mode == compat.ModeKeys.TRAIN))\n\n if mode == compat.ModeKeys.INFER:\n logging.info(\"Creating test dataset.\")\n return dataset.cache().padded_batch(\n dataset_utils.adjust_batch_size(args[\"batch_size\"],\n num_replicas_in_sync=num_replicas_in_sync),\n padded_shapes={\"feature\": [None], \"src_lang\": [], \"trg_lang\": []},\n padding_values={\"feature\": eos, \"src_lang\": int_zero, \"trg_lang\": int_zero},\n drop_remainder=False)\n elif mode == compat.ModeKeys.EVAL:\n logging.info(\"Creating evaluation dataset.\")\n return dataset.cache().padded_batch(\n dataset_utils.adjust_batch_size(args[\"batch_size\"],\n num_replicas_in_sync=num_replicas_in_sync),\n padded_shapes={\"feature\": [None], \"label\": [None], \"src_lang\": [], \"trg_lang\": []},\n padding_values={\"feature\": eos, \"label\": eos,\n \"src_lang\": int_zero, \"trg_lang\": int_zero},\n drop_remainder=False)\n else:\n logging.info(\"Creating training dataset.\")\n dataset = dataset_utils.clean_dataset_by_length(\n dataset, {\"feature\": args[\"max_src_len\"], \"label\": args[\"max_trg_len\"]})\n if args[\"cache_dataset\"]:\n dataset = dataset.cache()\n if args[\"shuffle_buffer\"]:\n dataset = dataset.shuffle(buffer_size=args[\"shuffle_buffer\"])\n padding_values = {\"feature\": eos, \"label\": eos,\n \"src_lang\": int_zero, \"trg_lang\": int_zero}\n if args[\"max_src_len\"] is None:\n raise RuntimeError(\"Must provide `max_src_len` for training.\")\n if args[\"max_trg_len\"] is None:\n raise RuntimeError(\"Must provide `max_trg_len` for training.\")\n\n num_extra_srctokens = 0\n if self._with_src_lang_tag:\n num_extra_srctokens += 1\n if self._trg_lang_tag_position in [\"src\", \"source\"]:\n num_extra_srctokens += 1\n\n max_src_len = minimal_multiple(args[\"max_src_len\"] + num_extra_srctokens, 8)\n max_trg_len = minimal_multiple(args[\"max_trg_len\"], 8)\n batch_size = dataset_utils.adjust_batch_size(args[\"batch_size\"], args[\"batch_size_per_gpu\"],\n num_replicas_in_sync=num_replicas_in_sync,\n verbose=False)\n src_bucket_boundaries = [8 * i for i in range(1, max_src_len // 8 + 1)]\n if src_bucket_boundaries[-1] < max_src_len:\n src_bucket_boundaries.append(minimal_multiple(src_bucket_boundaries[-1] + 1, 8))\n trg_bucket_boundaries = [8 * i for i in range(1, max_trg_len // 8 + 1)]\n if trg_bucket_boundaries[-1] < max_trg_len:\n trg_bucket_boundaries.append(minimal_multiple(trg_bucket_boundaries[-1] + 1, 8))\n src_bucket_boundaries, trg_bucket_boundaries = dataset_utils.associated_bucket_boundaries(\n src_bucket_boundaries, trg_bucket_boundaries)\n src_bucket_boundaries = [x - num_extra_srctokens for x in src_bucket_boundaries]\n bucket_boundaries = {\n \"feature\": src_bucket_boundaries,\n \"label\": trg_bucket_boundaries\n }\n bucket_batch_sizes = dataset_utils.adjust_batch_size(\n batch_size,\n bucket_boundaries=bucket_boundaries if args[\"batch_by_tokens\"] else None,\n boundaries_reduce_to_length_fn=lambda x: max(tf.nest.flatten(x)),\n num_replicas_in_sync=num_replicas_in_sync)\n if isinstance(bucket_batch_sizes, list):\n bucket_batch_sizes = [\n int(maximum_lower_multiple(x // num_replicas_in_sync, 8) * num_replicas_in_sync)\n for x in bucket_batch_sizes]\n else:\n bucket_batch_sizes = int(maximum_lower_multiple(\n bucket_batch_sizes // num_replicas_in_sync, 8) * num_replicas_in_sync)\n return dataset_utils.batch_examples_by_token(\n dataset,\n bucket_boundaries=bucket_boundaries,\n bucket_batch_sizes=bucket_batch_sizes,\n padding_values=padding_values,\n example_length_func=lambda x: {\"feature\": tf.size(x[\"feature\"]),\n \"label\": tf.size(x[\"label\"])},\n extra_padded_shapes={\"src_lang\": [], \"trg_lang\": []}\n )\n\n def build_metric_layer(self):\n return [SequenceTokenMetricLayer(\"src\"), SequenceTokenMetricLayer(\"trg\"),\n BatchCountMetricLayer(\"src\")]\n\n def get_eval_metric(self, args, name=\"metric\", ds=None):\n \"\"\" Returns a neurst.metrics.metric.Metric object for evaluation.\"\"\"\n if ds is None or not hasattr(ds, \"trg_lang\") or ds.trg_lang is None:\n logging.info(\"WARNING: The dataset must have `trg_lang` property, \"\n \"otherwise no metric will be created.\")\n return None\n return build_metric(args[name + \".class\"], language=ds.trg_lang,\n **args[name + \".params\"])\n",
"# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport time\nimport traceback\n\nimport tensorflow as tf\nfrom absl import logging\n\nfrom neurst.exps import BaseExperiment, register_exp\nfrom neurst.tasks import build_task\nfrom neurst.training import Validator, build_validator\nfrom neurst.utils import compat\nfrom neurst.utils.configurable import ModelConfigs\nfrom neurst.utils.flags_core import Flag, ModuleFlag\n\n\n@register_exp\nclass Validation(BaseExperiment):\n \"\"\" Validation for all tasks during training. \"\"\"\n\n def __init__(self, args, **kwargs):\n \"\"\" Initializes a util class for vaidation. \"\"\"\n super(Validation, self).__init__(**kwargs)\n self._tb_log_dir = args[\"tb_log_dir\"]\n self._waiting_interval = args[\"waiting_interval\"]\n self._maximum_waiting_time = args[\"maximum_waiting_time\"]\n self._validator = build_validator(args)\n\n @staticmethod\n def class_or_method_args():\n return [\n Flag(\"tb_log_dir\", dtype=Flag.TYPE.STRING, default=None,\n help=\"The path to store tensorboard summary, or `model_dir`/validation by default.\"),\n Flag(\"waiting_interval\", dtype=Flag.TYPE.INTEGER, default=120,\n help=\"The waiting interval between two evaluation steps.\"),\n Flag(\"maximum_waiting_time\", dtype=Flag.TYPE.INTEGER, default=3600,\n help=\"The maximum waiting time(in seconds).\"),\n ModuleFlag(Validator.REGISTRY_NAME, help=\"The validation process during training.\"),\n ]\n\n def run(self):\n \"\"\" Repeats to call validator's validate function if new checkponts are observed.\n\n Step 1: Build model.\n Step 2: Fetch training status.\n while True:\n Step 3: Restore checkpoints.\n Step 4: Validate.\n \"\"\"\n if self.task is None or self.model is None:\n model_cfg_waiting_rounds = self._maximum_waiting_time // self._waiting_interval\n for i in range(model_cfg_waiting_rounds):\n try:\n args = ModelConfigs.load(self._model_dir)\n break\n except FileNotFoundError:\n logging.info(f\"Fail to load model configs from directory: {self.model_dir}. \"\n f\"Wait for another {self._waiting_interval}s, \"\n f\"patience={model_cfg_waiting_rounds - 1 - i}.\")\n time.sleep(self._waiting_interval)\n self._task = build_task(args)\n self._model = self.task.build_model(args)\n # initialize the checkpoint manager\n saver = compat.get_saver_or_default(self.model, self.model_dir)\n # enable tensorboard\n if self._tb_log_dir is None:\n self._tb_log_dir = os.path.join(self.model_dir, \"validation_{}\".format(int(time.time())))\n file_writer = tf.summary.create_file_writer(self._tb_log_dir)\n file_writer.set_as_default()\n # create training\n self._validator.build(self.strategy, self.task, self.model)\n last_triggered_step = None\n accumulated_waiting_time = 0\n this_waiting_interval = next_waiting_interval = self._waiting_interval\n while True:\n bad_cnt = 0\n while bad_cnt < 5:\n try:\n ckpt_state = tf.train.get_checkpoint_state(self.model_dir)\n break\n except ValueError:\n bad_cnt += 1\n time.sleep(5)\n logging.info(traceback.format_exc())\n if bad_cnt >= 5:\n ckpt_state = tf.train.get_checkpoint_state(self.model_dir)\n\n ckpts_to_be_restore = None\n if ckpt_state is None:\n logging.info(f\"No checkpoint in directory: {self.model_dir}. Please wait.\")\n else:\n all_ckpts = [(t, x) for t, x in zip(ckpt_state.all_model_checkpoint_timestamps,\n ckpt_state.all_model_checkpoint_paths)]\n global_steps_to_be_restore = []\n ckpts_to_be_restore = []\n for ckpt in all_ckpts[::-1]:\n step = compat.hack_global_step(ckpt[1])\n if last_triggered_step is None or step > last_triggered_step:\n ckpts_to_be_restore.insert(0, ckpt)\n global_steps_to_be_restore.insert(0, step)\n if len(ckpts_to_be_restore) > 0:\n accumulated_waiting_time = 0\n _start_time = time.time()\n for step, (timestamp, ckpt) in zip(global_steps_to_be_restore, ckpts_to_be_restore):\n try:\n stat = saver.restore(ckpt)\n except tf.errors.NotFoundError:\n logging.info(f\"Not found checkpoint {ckpt}. Skip...\")\n if not stat:\n logging.info(f\"Fail to restore checkpoint from {ckpt}. Skip...\")\n continue\n logging.info(f\"Checkpoint with global_step={step} triggered on {timestamp}\")\n self._validator.validate(step)\n last_triggered_step = step\n this_waiting_interval = max(this_waiting_interval - int(time.time() - _start_time), 10)\n tf.summary.flush(file_writer)\n if ckpts_to_be_restore is None:\n pass\n elif len(ckpts_to_be_restore) > 1:\n this_waiting_interval = int(this_waiting_interval * 1.\n * (len(ckpts_to_be_restore) // 2) / len(ckpts_to_be_restore))\n next_waiting_interval = this_waiting_interval\n elif len(ckpts_to_be_restore) == 0:\n next_waiting_interval = min(int(this_waiting_interval * 4. / 3.), self._waiting_interval)\n this_waiting_interval = this_waiting_interval // 2\n accumulated_waiting_time += this_waiting_interval\n if accumulated_waiting_time > self._maximum_waiting_time:\n logging.info(f\"Waited for maximum patience: {self._maximum_waiting_time}s\")\n break\n time.sleep(this_waiting_interval)\n this_waiting_interval = next_waiting_interval\n"
] |
[
[
"tensorflow.io.gfile.GFile",
"tensorflow.math.not_equal",
"tensorflow.keras.Model"
],
[
"tensorflow.convert_to_tensor",
"numpy.array",
"numpy.reshape"
],
[
"tensorflow.io.gfile.GFile",
"tensorflow.cast",
"tensorflow.keras.backend.batch_set_value",
"tensorflow.keras.Model"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.TensorShape",
"tensorflow.constant",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.shape",
"tensorflow.expand_dims",
"tensorflow.nest.flatten",
"tensorflow.size"
],
[
"tensorflow.train.get_checkpoint_state",
"tensorflow.summary.flush",
"tensorflow.summary.create_file_writer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrotke/pyStock
|
[
"76aad7c8bdd112d3a53ed013cbe9ff660a90d5bf"
] |
[
"lib/moneyflowindex.py"
] |
[
"# Add import from parent directory possible\nimport sys\nimport pandas as pd\nimport numpy\nimport matplotlib.pyplot as plt\nfrom lib.DataOperations import *\nfrom lib.ReportSignals import *\nfrom lib.Stock import *\nfrom lib.indicator import indicator\n\n# Creates MoneyFlowIndex object\n\n\ndef CreateMoneyFlowIndex(high, low, close, volume, info, n=14):\n return MoneyFlowIndex(high, low, close, volume, info, n)\n\n\n# MoneyFlowIndex object which creates MoneyFlowIndex data\nclass MoneyFlowIndex(indicator):\n\n def __init__(self, high, low, close, volume, info, n=14):\n indicator.__init__(self, 'MFI%u' % n, 'momentum')\n self.n = n\n self.info = info\n self.typicalPrice = (high + low + close) / 3\n self.moneyFlow, self.posFlow, self.negFlow, self.mfi = self.InitMoneyFlow(\n self.typicalPrice, volume, n)\n # money on the market plot\n self.moneyMarket = self.moneyFlow.cumsum()\n\n # Signals\n fromBottom, fromTop = FindIntersections(self.mfi, 20)\n self.buy = fromBottom\n fromBottom, fromTop = FindIntersections(self.mfi, 80)\n self.sell = fromTop\n # TrenToFall / TrendToRise\n fromBottom, fromTop = FindIntersections(self.mfi, 10)\n self.buyStrong = fromBottom\n fromBottom, fromTop = FindIntersections(self.mfi, 90)\n self.sellStrong = fromTop\n\n # returns AverageTrueRange\n def GetMoneyFlow(self):\n return self.MoneyFlow\n\n # Set MoneyFlowIndex indicator\n def InitMoneyFlow(self, tp, volume, n):\n moneyFlow = tp * volume\n posFlow = pd.Series()\n negFlow = pd.Series()\n\n for i in range(1, len(moneyFlow)):\n if (moneyFlow[i] >= 0):\n posFlow = posFlow.append(\n pd.Series(moneyFlow.values[i], index=[moneyFlow.index[i]]))\n negFlow = negFlow.append(\n pd.Series(0, index=[moneyFlow.index[i]]))\n else:\n posFlow = posFlow.append(\n pd.Series(0, index=[moneyFlow.index[i]]))\n negFlow = negFlow.append(\n pd.Series(abs(moneyFlow.values[i]), index=[moneyFlow.index[i]]))\n\n posFlowAvg = CreateMovingAverage(posFlow, n)\n negFlowAvg = CreateMovingAverage(negFlow, n)\n moneyRatio = posFlowAvg / negFlowAvg\n moneyFlowIndex = (100 * posFlowAvg) / (posFlowAvg + negFlowAvg)\n return moneyFlow, posFlow, negFlow, moneyFlowIndex\n\n # Export indicator signals to report\n def ExportSignals(self, reportSignals):\n reportSignals.AddDataframeSignals(self.buy, 'MFI', 'buy')\n reportSignals.AddDataframeSignals(self.sell, 'MFI', 'sell')\n reportSignals.AddDataframeSignals(self.buyStrong, 'MFI', 'buyStrong')\n reportSignals.AddDataframeSignals(self.sellStrong, 'MFI', 'sellStrong')\n\n # retunrs -100...100 value\n def GetUnifiedValue(self):\n return (self.mfi[-1] - 50)*2\n\n # Plot method\n def PlotPosNegFlow(self):\n plt.bar(self.negFlow.index, self.negFlow, color='red', label='')\n plt.bar(self.posFlow.index, self.posFlow, color='green', label='')\n # MoneyFlowIndex\n# plt.plot(self.posFlow.index, self.posFlow, label='PosFlow' + str(self.n), linewidth=1.0, color = 'green')\n# plt.plot(self.negFlow.index, self.negFlow, label='NegFlow' + str(self.n), linewidth=1.0, color = 'red')\n\n # Plot method\n\n def Plot(self):\n # MoneyFlowIndex\n plt.plot(self.mfi.index, self.mfi, label='MFI'\n + str(self.n), linewidth=1.0, color='#000000')\n x_axis = self.mfi.index.get_level_values(0)\n\n # OverBought\n overBought = CreateHorizontalLine(self.mfi.index, 80, 80, True)\n plt.plot(overBought.index, overBought, '--',\n label='Overbought', linewidth=1.0, color='#940006')\n# plt.fill_between(x_axis, self.mfi, overBought['value'],\n# where=self.mfi>overBought.values,color='#ffb3b3')\n # OverBought - Gene Quong and Avrum Soudack\n overBought = CreateHorizontalLine(self.mfi.index, 90, 90)\n plt.plot(overBought.index, overBought, '--',\n linewidth=0.6, color='#940006')\n\n # OverSold\n overSold = CreateHorizontalLine(self.mfi.index, 20, 20, True)\n plt.plot(overSold.index, overSold, '--',\n label='Oversold', linewidth=1.0, color='#169400')\n# plt.fill_between(x_axis, self.mfi, overSold['value'],\n# where=self.mfi<overSold.values,color='#b3ffb3')\n # OverSold - Gene Quong and Avrum Soudack\n overSold = CreateHorizontalLine(self.mfi.index, 10, 10)\n plt.plot(overSold.index, overSold, '--',\n linewidth=0.6, color='#169400')\n\n# # Signals plottting\n if (self.buy is not None and self.buy.size):\n plt.plot(self.buy.index, self.buy, 'o', color='#000000', ms=8)\n plt.plot(self.buy.index, self.buy, 'o',\n label='Buy', color='#00FF00')\n if (self.buyStrong is not None and self.buyStrong.size):\n plt.plot(self.buyStrong.index, self.buyStrong,\n 's', color='#000000', ms=8)\n plt.plot(self.buyStrong.index, self.buyStrong,\n 's', label='BuyStrong', color='#00FF00')\n if (self.sell is not None and self.sell.size):\n plt.plot(self.sell.index, self.sell, 'o', color='#000000', ms=8)\n plt.plot(self.sell.index, self.sell, 'o',\n label='Sell', color='#FF0000')\n if (self.sellStrong is not None and self.sellStrong.size):\n plt.plot(self.sellStrong.index, self.sellStrong,\n 's', color='#000000', ms=8)\n plt.plot(self.sellStrong.index, self.sellStrong,\n 's', label='SellStrong', color='#FF0000')\n\n # Limits of plot\n plt.ylim(top=100, bottom=0)\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"pandas.Series",
"matplotlib.pyplot.bar"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
busySZl/pyfiberamp
|
[
"e6ddb34413e145cd662f7f0f23290bd872871978"
] |
[
"pyfiberamp/spectroscopies/spectroscopy.py"
] |
[
"from pyfiberamp.helper_funcs import *\n\nfrom scipy.interpolate import UnivariateSpline\nimport matplotlib.pyplot as plt\n\n\nclass Spectroscopy:\n @classmethod\n def from_files(cls, absorption_cross_section_file, emission_cross_section_file, upper_state_lifetime):\n absorption_spectrum = load_spectrum(absorption_cross_section_file)\n gain_spectrum = load_spectrum(emission_cross_section_file)\n return cls(absorption_spectrum, gain_spectrum, upper_state_lifetime)\n\n def __init__(self, absorption_cross_sections, emission_cross_sections, upper_state_lifetime):\n self.absorption_cs_spectrum = absorption_cross_sections\n self.emission_cs_spectrum = emission_cross_sections\n self.absorption_cs_interp = self._make_cross_section_interpolate(absorption_cross_sections)\n self.gain_cs_interp = self._make_cross_section_interpolate(emission_cross_sections)\n self.upper_state_lifetime = upper_state_lifetime\n\n @staticmethod\n def _make_cross_section_interpolate(spectrum):\n \"\"\"Creates a cubic spline interpolate from the imported cross section data. Cross section is assumed to be\n zero outside the imported data range.\"\"\"\n frequency = wl_to_freq(spectrum[::-1, 0])\n cross_section = spectrum[::-1, 1]\n spline = UnivariateSpline(frequency, cross_section, s=CROSS_SECTION_SMOOTHING_FACTOR, ext='zeros')\n\n def interp(freq):\n cross_sec = spline(freq)\n cross_sec[cross_sec < 0] = 0\n return cross_sec\n\n return interp\n\n def plot_gain_and_absorption_spectrum(self):\n \"\"\"Convenience plotting function to draw the imported cross section data and the calculated interpolates to\n check that they match.\"\"\"\n fig, ax = plt.subplots()\n gain = self.emission_cs_spectrum\n absorption = self.absorption_cs_spectrum\n gain_wls = np.linspace(gain[0, 0], gain[-1, 0], SPECTRUM_PLOT_NPOINTS)\n gain_vs = wl_to_freq(gain_wls)\n absorption_wls = np.linspace(absorption[0, 0], absorption[-1, 0], SPECTRUM_PLOT_NPOINTS)\n absorption_vs = wl_to_freq(absorption_wls)\n ax.plot(gain[:, 0] * 1e9, gain[:, 1], label='Gain')\n ax.plot(absorption[:, 0] * 1e9, absorption[:, 1], label='Absorption')\n ax.plot(absorption_wls * 1e9, self.absorption_cs_interp(absorption_vs), label='Absorption spline')\n ax.plot(gain_wls * 1e9, self.gain_cs_interp(gain_vs), label='Gain spline')\n ax.legend()\n ax.set_xlabel('Wavelength (nm)', fontsize=18)\n ax.set_ylabel('Gain/Absorption cross sections', fontsize=18)\n plt.show()\n\n\nYbGermanoSilicate= Spectroscopy.from_files(YB_ABSORPTION_CS_FILE, YB_EMISSION_CS_FILE, YB_UPPER_STATE_LIFETIME)\n"
] |
[
[
"scipy.interpolate.UnivariateSpline",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
aws-samples/amazon-sagemaker-local-mode
|
[
"f470d7b543f7895094816c3f58b9981e044764d8",
"f470d7b543f7895094816c3f58b9981e044764d8",
"f470d7b543f7895094816c3f58b9981e044764d8"
] |
[
"scikit_learn_script_mode_local_serving_no_model_artifact/code/inference.py",
"lightgbm_bring_your_own_container_local_training_and_serving/lightgbm_bring_your_own_container_local_training_and_serving.py",
"tensorflow_script_mode_california_housing_local_training_and_batch_transform/tensorflow_script_mode_california_housing_local_training_and_batch_transform.py"
] |
[
"import logging\nimport sys\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n\n\n# Perform prediction on the deserialized object, with the loaded model\ndef predict_fn(input_object, model):\n logger.info(\"predict_fn\")\n logger.info(f\"input_object: {input_object}\")\n\n response = np.average(input_object)\n logger.info(f\"returning response: {response}\")\n\n return response\n\n# Dummy model_fn function\ndef model_fn(model_dir):\n dummy_model = {}\n return dummy_model",
"# This is a sample Python program that trains a simple LightGBM Regression model, and then performs inference.\n# This implementation will work on your local computer.\n#\n# Prerequisites:\n# 1. Install required Python packages:\n# pip install boto3 sagemaker pandas scikit-learn\n# pip install 'sagemaker[local]'\n# 2. Docker Desktop has to be installed on your computer, and running.\n# 3. Open terminal and run the following commands:\n# docker build -t sagemaker-lightgbm-regression-local container/.\n########################################################################################################################\n\nimport pandas as pd\nfrom sagemaker.estimator import Estimator\nfrom sagemaker.local import LocalSession\nfrom sagemaker.predictor import csv_serializer\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\n\nsagemaker_session = LocalSession()\nsagemaker_session.config = {'local': {'local_code': True}}\n\n# For local training a dummy role will be sufficient\nrole = 'arn:aws:iam::111111111111:role/service-role/AmazonSageMaker-ExecutionRole-20200101T000001'\n\ndata = load_boston()\n\nX_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.25, random_state=45)\nX_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=45)\n\ntrainX = pd.DataFrame(X_train, columns=data.feature_names)\ntrainX['target'] = y_train\n\nvalX = pd.DataFrame(X_test, columns=data.feature_names)\nvalX['target'] = y_test\n\ntestX = pd.DataFrame(X_test, columns=data.feature_names)\n\nlocal_train = './data/train/boston_train.csv'\nlocal_validation = './data/validation/boston_validation.csv'\nlocal_test = './data/test/boston_test.csv'\n\ntrainX.to_csv(local_train, header=None, index=False)\nvalX.to_csv(local_validation, header=None, index=False)\ntestX.to_csv(local_test, header=None, index=False)\n\nimage = 'sagemaker-lightgbm-regression-local'\n\nlocal_lightgbm = Estimator(\n image,\n role,\n instance_count=1,\n instance_type=\"local\",\n hyperparameters={'boosting_type': 'gbdt',\n 'objective': 'regression',\n 'num_leaves': 31,\n 'learning_rate': 0.05,\n 'feature_fraction': 0.9,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 5,\n 'verbose': 0})\n\ntrain_location = 'file://'+local_train\nvalidation_location = 'file://'+local_validation\nlocal_lightgbm.fit({'train':train_location, 'validation': validation_location}, logs=True)\n\npredictor = local_lightgbm.deploy(1, 'local', serializer=csv_serializer)\n\nwith open(local_test, 'r') as f:\n payload = f.read().strip()\n\npredicted = predictor.predict(payload).decode('utf-8')\nprint(predicted)\n\npredictor.delete_endpoint(predictor.endpoint)\n",
"# This is a sample Python program that trains a simple TensorFlow California Housing model and run Batch Transform job.\n# This implementation will work on your *local computer* or in the *AWS Cloud*.\n# To run training and inference *locally* set: `config = get_config(LOCAL_MODE)`\n# To run training and inference on the *cloud* set: `config = get_config(CLOUD_MODE)` and set a valid IAM role value in get_config()\n#\n# Prerequisites:\n# 1. Install required Python packages:\n# `pip install -r requirements.txt`\n# 2. Docker Desktop installed and running on your computer:\n# `docker ps`\n# 3. You should have AWS credentials configured on your local machine\n# in order to be able to pull the docker image from ECR.\n###############################################################################################\n\nimport os\n\nimport pandas as pd\nimport sklearn.model_selection\nfrom sagemaker.tensorflow import TensorFlow\nfrom sklearn.datasets import *\nfrom sklearn.preprocessing import StandardScaler\n\nDUMMY_IAM_ROLE = 'arn:aws:iam::111111111111:role/service-role/AmazonSageMaker-ExecutionRole-20200101T000001'\n\n\ndef download_training_and_eval_data():\n if os.path.isfile('./data/train/x_train.csv') and \\\n os.path.isfile('./data/test/x_test.csv') and \\\n os.path.isfile('./data/train/y_train.csv') and \\\n os.path.isfile('./data/test/y_test.csv'):\n print('Training and evaluation datasets exist. Skipping Download')\n else:\n print('Downloading training and evaluation dataset')\n data_dir = os.path.join(os.getcwd(), 'data')\n os.makedirs(data_dir, exist_ok=True)\n\n train_dir = os.path.join(os.getcwd(), 'data/train')\n os.makedirs(train_dir, exist_ok=True)\n\n test_dir = os.path.join(os.getcwd(), 'data/test')\n os.makedirs(test_dir, exist_ok=True)\n\n input_dir = os.path.join(os.getcwd(), 'data/input')\n os.makedirs(input_dir, exist_ok=True)\n\n output_dir = os.path.join(os.getcwd(), 'data/output')\n os.makedirs(output_dir, exist_ok=True)\n\n data_set = fetch_california_housing()\n\n X = pd.DataFrame(data_set.data, columns=data_set.feature_names)\n Y = pd.DataFrame(data_set.target)\n\n # We partition the dataset into 2/3 training and 1/3 test set.\n x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.33)\n\n scaler = StandardScaler()\n scaler.fit(x_train)\n x_train = scaler.transform(x_train)\n x_test = scaler.transform(x_test)\n\n pd.DataFrame(x_train).to_csv(os.path.join(train_dir, 'x_train.csv'), header=None, index=False)\n pd.DataFrame(x_test).to_csv(os.path.join(test_dir, 'x_test.csv'),header=None, index=False)\n pd.DataFrame(x_test).to_csv(os.path.join(input_dir, 'x_test.csv'),header=None, index=False)\n pd.DataFrame(y_train).to_csv(os.path.join(train_dir, 'y_train.csv'), header=None, index=False)\n pd.DataFrame(y_test).to_csv(os.path.join(test_dir, 'y_test.csv'), header=None, index=False)\n\n print('Downloading completed')\n\n\ndef main():\n download_training_and_eval_data()\n\n print('Starting model training.')\n print(\n 'Note: if launching for the first time in local mode, container image download might take a few minutes to complete.')\n california_housing_estimator = TensorFlow(entry_point='california_housing_tf2.py',\n source_dir='code',\n role=DUMMY_IAM_ROLE,\n instance_count=1,\n instance_type='local',\n framework_version='2.4.1',\n py_version='py37')\n\n inputs = {'train': 'file://./data/train', 'test': 'file://./data/test'}\n california_housing_estimator.fit(inputs)\n print('Completed model training')\n\n print('Running Batch Transform in local mode')\n tensorflow_serving_transformer = california_housing_estimator.transformer(\n instance_count=1,\n instance_type='local',\n output_path='file:./data/output',\n )\n\n tensorflow_serving_transformer.transform('file://./data/input',\n split_type='Line',\n content_type='text/csv')\n\n print('Printing Batch Transform output file content')\n output_file = open('./data/output/x_test.csv.out', 'r').read()\n print(output_file)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.average"
],
[
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.datasets.load_boston"
],
[
"sklearn.preprocessing.StandardScaler",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
tomstark99/play-fair
|
[
"5b4ad20ebb96d1162f3bd696aba0a6b57006ab0a"
] |
[
"src/models/components/consensus.py"
] |
[
"import torch.nn\nfrom torch import nn\n\n\nclass SegmentConsensus(torch.nn.Module):\n def __init__(self, consensus_type, dim=1):\n super().__init__()\n self.consensus_type = consensus_type\n self.dim = dim\n\n def forward(self, input_tensor):\n if self.consensus_type == \"avg\":\n output = input_tensor.mean(dim=self.dim, keepdim=True)\n elif self.consensus_type == \"identity\":\n output = input_tensor\n else:\n raise NotImplementedError(\"Only avg and identity consensus implemented\")\n return output\n\n\nclass AverageConsensus(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n x: tensor of shape :math:`(N, T, C)`\n\n Returns:\n Input tensor averaged over the time dimension of shape :math:`(N, C)`\n \"\"\"\n assert x.dim() == 3\n return x.mean(dim=1)\n\n\nclass ClassifierConsensus(nn.Module):\n def __init__(self, input_dim: int, output_dim: int, input_relu: bool = True,\n dropout: float = 0):\n super().__init__()\n self.classifier = nn.Linear(input_dim, output_dim)\n self.relu = nn.ReLU() if input_relu else None\n self.dropout = nn.Dropout(dropout)\n self.consensus = AverageConsensus()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if self.relu is not None:\n x = self.relu(x)\n x = self.dropout(x)\n x = self.classifier(x)\n return self.consensus(x)\n\n\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sebamenabar/oc-fewshot-public
|
[
"eb12bd5b426518fd8353304f0760f5c24f1b3c12",
"eb12bd5b426518fd8353304f0760f5c24f1b3c12",
"eb12bd5b426518fd8353304f0760f5c24f1b3c12",
"2dad8c9f24cb1bfe72d8b13b33d28f6788d86ca8",
"2dad8c9f24cb1bfe72d8b13b33d28f6788d86ca8"
] |
[
"fewshot/experiments/metrics.py",
"fewshot/models/modules/online_imp_memory.py",
"fewshot/data/iterators/semisupervised_episode_iterator_tests.py",
"fewshot/models/modules/gru.py",
"fewshot/models/nets/online_proto_sigmoid_net.py"
] |
[
"\"\"\"Metrics.\n\nAuthor: Mengye Ren ([email protected])\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nimport sklearn.metrics\n\n\ndef label_equal(pred, label, axis=-1):\n return pred == label.astype(pred.dtype)\n\n\ndef top1_correct(pred, label, axis=-1):\n \"\"\"Calculates top 1 correctness.\"\"\"\n assert pred.shape[0] == label.shape[0], '{} != {}'.format(\n pred.shape[0], label.shape[0])\n pred_idx = np.argmax(pred, axis=axis)\n return pred_idx == label.astype(pred_idx.dtype)\n\n\ndef top1_acc(pred, label, axis=-1):\n \"\"\"Calculates top 1 accuracy.\"\"\"\n return top1_correct(pred, label, axis=axis).mean()\n\n\ndef topk_acc(pred, label, k, axis=-1):\n \"\"\"Calculates top 5 accuracy.\"\"\"\n assert pred.shape[0] == label.shape[0], '{} != {}'.format(\n pred.shape[0], label.shape[0])\n topk_choices = np.argsort(pred, axis=axis)\n if len(topk_choices.shape) == 2:\n topk_choices = topk_choices[:, ::-1][:, :k]\n else:\n raise NotImplementedError()\n return np.sum(topk_choices == np.expand_dims(label, axis), axis=axis).mean()\n\n\ndef stderr(array, axis=0):\n \"\"\"Calculates standard error.\"\"\"\n if len(array) > 0:\n return array.std(axis=axis) / np.sqrt(float(array.shape[0]))\n else:\n return 0.0\n\n\ndef mean(array, axis=0):\n \"\"\"Calculates standard error.\"\"\"\n return array.mean(axis=axis) if len(array) > 0 else 0.0\n\n\ndef calc_ap(results_list, verbose=True):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n y_gt_list = []\n y_full_list = []\n pred_list = []\n score_list = []\n cat_list = []\n\n for r in results_list:\n flag = r['flag'].astype(np.bool)\n flag_ = np.expand_dims(flag, -1)\n gt_ = r['y_gt'][flag]\n pred_ = np.argmax(r['pred'][:, :, :-1], axis=-1)[flag]\n score_ = r['pred'][:, :, -1][flag] # Unknown score.\n\n y_gt_list.append(gt_)\n pred_list.append(pred_)\n score_list.append(score_) # Category agnostic\n\n if verbose:\n print('y_gt', y_gt_list[-1], y_gt_list[-1].shape)\n print('pred', pred_list[-1], pred_list[-1].shape)\n y_gt = np.concatenate(y_gt_list)\n score = np.concatenate(score_list)\n y_pred = np.concatenate(pred_list)\n\n N = len(y_gt)\n sortidx = np.argsort(score)\n score = score[sortidx]\n y_gt = y_gt[sortidx]\n y_pred = y_pred[sortidx]\n\n tp = (y_gt == y_pred).astype(np.float64)\n pos = (y_gt < unk_id).astype(np.float64)\n npos = pos.sum()\n\n if verbose:\n print('score sorted', score)\n print('y_gt', y_gt)\n print('y_pred', y_pred)\n print('tp', tp)\n print('unk id', unk_id)\n\n recall = np.zeros([N], dtype=np.float64)\n tp_cumsum = np.cumsum(tp)\n if verbose:\n print('npos', npos)\n print('tp cumsum', tp_cumsum)\n precision = tp_cumsum / np.arange(1, N + 1).astype(np.float64)\n recall = tp_cumsum / npos\n precision = np.concatenate([[1.0], precision])\n recall = np.concatenate([[0.0], recall])\n ap = sklearn.metrics.auc(recall, precision)\n if verbose:\n print('precision', precision)\n print('recall', recall)\n print('ap', ap)\n return ap\n\n\ndef calc_interval(y_gt, y=None):\n if y is None:\n y = y_gt\n B = y_gt.shape[0]\n # Last time we have seen a class.\n last_seen = np.zeros([B, y_gt.max() + 1]) - 1\n ninterval = np.zeros(y.shape, dtype=np.int64)\n for i in range(y.shape[1]):\n last_seen_ = last_seen[np.arange(B), y_gt[:, i]]\n ninterval[:, i] = i - last_seen_\n last_seen[np.arange(B), y_gt[:, i]] = i\n return ninterval\n\n\ndef calc_nshot(y_gt, y=None):\n if y is None:\n y = y_gt\n nway = np.max(y_gt)\n B, T = y_gt.shape\n waylist = np.arange(nway + 1)\n onehot_bool = np.expand_dims(y_gt, -1) == waylist\n onehot_bool_y = np.expand_dims(y, -1) == waylist\n onehot = (onehot_bool).astype(np.int64) # [B, T, K]\n onehot_cumsum = np.cumsum(onehot, axis=1)\n nshot = onehot_cumsum[onehot_bool_y].reshape([B, T]) - (y_gt == y).astype(\n np.int64)\n return nshot\n\n\ndef calc_nshot_ap(results_list, nshot_max):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n nshot_list = [calc_nshot(r['y_full']) for r in results_list]\n ap_list = [0.0] * nshot_max\n\n for n in range(1, nshot_max + 1):\n sel_list = [s == n for s in nshot_list]\n y_gt_list = [r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [r['flag'][s][None, :] for s, r in zip(sel_list, results_list)]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n ap_list[n - 1] = calc_ap(subresults, verbose=False)\n return np.array(ap_list)\n\n\ndef calc_acc(results_list):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n y_gt_list = []\n pred_list = []\n acc_list = []\n\n for r in results_list:\n flag = r['flag'].astype(np.bool)\n y_gt_list.append(r['y_gt'][flag])\n flag_ = np.expand_dims(flag, -1)\n pred_list.append(np.argmax(r['pred'][:, :, :-1], axis=-1)[flag])\n if len(y_gt_list[-1]) > 0:\n acc_list.append(\n np.mean((y_gt_list[-1] == pred_list[-1]).astype(np.float64)))\n y_gt = np.concatenate(y_gt_list)\n y_pred = np.concatenate(pred_list)\n correct = (y_pred == y_gt).astype(np.float64)\n return correct.mean(), stderr(np.array(acc_list))\n\n\ndef calc_nshot_acc(results_list, nshot_max, labeled=False):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n if labeled:\n nshot_list = [calc_nshot(r['y_s'], y=r['y_full']) for r in results_list]\n else:\n nshot_list = [calc_nshot(r['y_full']) for r in results_list]\n acc_list = [0.0] * nshot_max\n stderr_list = [0.0] * nshot_max\n for n in range(1, nshot_max + 1):\n sel_list = [s == n for s in nshot_list]\n known_list = [r['y_gt'] < unk_id for r in results_list]\n sel_list = [np.logical_and(s, k) for s, k in zip(sel_list, known_list)]\n y_gt_list = [r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [r['flag'][s][None, :] for s, r in zip(sel_list, results_list)]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n acc_list[n - 1], stderr_list[n - 1] = calc_acc(subresults)\n return np.array(acc_list), np.array(stderr_list)\n\n\ndef calc_nshot_acc_2d(results_list, nappear_max, nshot_max):\n \"\"\"Combining labeled and unlabeled. X-axis number of appearances, Y-axis\n number of labels.\"\"\"\n N = nappear_max\n M = nshot_max\n unk_id = results_list[0]['pred'].shape[-1] - 1\n acc_list = np.zeros([N, M])\n stderr_list = np.zeros([N, M])\n nappear_list = [calc_nshot(r['y_full']) for r in results_list]\n nshot_list = [calc_nshot(r['y_s'], y=r['y_full']) for r in results_list]\n for n in range(1, N + 1):\n for m in range(1, M + 1):\n sel_list = [\n np.logical_and(nappear_ == n, nshot_ == m)\n for nappear_, nshot_ in zip(nappear_list, nshot_list)\n ]\n if m > n:\n assert all([np.logical_not(s).all() for s in sel_list])\n known_list = [r['y_gt'] < unk_id for r in results_list]\n sel_list = [np.logical_and(s, k) for s, k in zip(sel_list, known_list)]\n y_gt_list = [\n r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [\n r['flag'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n acc_list[n - 1, m - 1], stderr_list[n - 1, m - 1] = calc_acc(subresults)\n return acc_list, stderr_list\n\n\ndef calc_nshot_acc_3d(results_list, nappear_max, nshot_max, ninterval_split):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n N = nappear_max\n M = nshot_max\n K = len(ninterval_split) - 1\n acc_list = np.zeros([N, M, K])\n stderr_list = np.zeros([N, M, K])\n nappear_list = [calc_nshot(r['y_full']) for r in results_list]\n nshot_list = [calc_nshot(r['y_s'], y=r['y_full']) for r in results_list]\n ninterval_list = [calc_interval(r['y_full']) for r in results_list]\n for n in range(1, N + 1):\n for m in range(1, M + 1):\n for k in range(1, K + 1):\n sel_list = [\n np.logical_and(\n np.logical_and(nappear_ == n, nshot_ == m),\n np.logical_and(ninterval_ >= ninterval_split[k - 1],\n ninterval_ < ninterval_split[k])) for nappear_,\n nshot_, ninterval_ in zip(nappear_list, nshot_list, ninterval_list)\n ]\n if m > n:\n assert all([np.logical_not(s).all() for s in sel_list])\n\n known_list = [r['y_gt'] < unk_id for r in results_list]\n sel_list = [np.logical_and(s, k) for s, k in zip(sel_list, known_list)]\n y_gt_list = [\n r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [\n r['flag'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n acc_list[n - 1, m - 1, k - 1], stderr_list[n - 1, m - 1, k -\n 1] = calc_acc(subresults)\n return acc_list, stderr_list\n\n\ndef calc_nshot_acc_2dv2(results_list, nappear_max, ninterval_split):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n N = nappear_max\n K = len(ninterval_split) - 1\n acc_list = np.zeros([N, K])\n stderr_list = np.zeros([N, K])\n nappear_list = [calc_nshot(r['y_full']) for r in results_list]\n nshot_list = [calc_nshot(r['y_s'], y=r['y_full']) for r in results_list]\n ninterval_list = [calc_interval(r['y_full']) for r in results_list]\n # print(ninterval_list[0])\n for n in range(1, N + 1):\n for k in range(1, K + 1):\n sel_list = [\n np.logical_and(\n nappear_ == n,\n np.logical_and(ninterval_ >= ninterval_split[k - 1],\n ninterval_ < ninterval_split[k]))\n for nappear_, ninterval_ in zip(nappear_list, ninterval_list)\n ]\n known_list = [r['y_gt'] < unk_id for r in results_list]\n sel_list = [np.logical_and(s, k) for s, k in zip(sel_list, known_list)]\n y_gt_list = [\n r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [\n r['flag'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n acc_list[n - 1, k - 1], stderr_list[n - 1, k - 1] = calc_acc(subresults)\n return acc_list, stderr_list\n\n\ndef calc_nshot_acc_2dv3(results_list, nshot_max, ninterval_split):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n M = nshot_max\n K = len(ninterval_split) - 1\n acc_list = np.zeros([M, K])\n stderr_list = np.zeros([M, K])\n nappear_list = [calc_nshot(r['y_full']) for r in results_list]\n nshot_list = [calc_nshot(r['y_s'], y=r['y_full']) for r in results_list]\n ninterval_list = [calc_interval(r['y_full']) for r in results_list]\n for m in range(1, M + 1):\n for k in range(1, K + 1):\n sel_list = [\n np.logical_and(\n nshot_ == m,\n np.logical_and(ninterval_ >= ninterval_split[k - 1],\n ninterval_ < ninterval_split[k]))\n for nshot_, ninterval_ in zip(nshot_list, ninterval_list)\n ]\n known_list = [r['y_gt'] < unk_id for r in results_list]\n sel_list = [np.logical_and(s, k) for s, k in zip(sel_list, known_list)]\n y_gt_list = [\n r['y_gt'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n pred_list = [\n r['pred'][s][None, :, :] for s, r in zip(sel_list, results_list)\n ]\n flag_list = [\n r['flag'][s][None, :] for s, r in zip(sel_list, results_list)\n ]\n subresults = [{\n 'y_gt': y,\n 'pred': p,\n 'flag': f\n } for y, p, f in zip(y_gt_list, pred_list, flag_list)]\n acc_list[m - 1, k - 1], stderr_list[m - 1, k - 1] = calc_acc(subresults)\n return acc_list, stderr_list\n\n\ndef calc_acc_time(results_list, tmax):\n acc_time = [] # [T, N] T=number of timestep; N=number of episodes\n for t in range(tmax):\n acc_time.append([])\n for i, r in enumerate(results_list):\n # Support set metrics, accumulate per time step.\n correct = label_equal(r['pred_id'], r['y_gt']) # [B, T]\n for t in range(tmax):\n if r['flag'][:, t].sum() > 0:\n acc_time[t].append(correct[:, t].sum() / r['flag'][:, t].sum())\n acc_time = [np.array(l) for l in acc_time]\n return np.array([mean(l) for l in acc_time]), np.array(\n [stderr(l) for l in acc_time])\n\n\ndef calc_acc_time_label(results_list, tmax):\n unk_id = results_list[0]['pred'].shape[-1] - 1\n acc_time = [] # [T, N] T=number of timestep; N=number of episodes\n for t in range(tmax):\n acc_time.append([])\n for i, r in enumerate(results_list):\n # Support set metrics, accumulate per time step.\n correct = label_equal(np.argmax(r['pred'][:, :, :-1], axis=-1),\n r['y_gt']) # [B, T]\n T = r['y_gt'].shape[1]\n flag = r['flag']\n is_unk = (r['y_gt'] == unk_id).astype(np.float32) # [B, T]\n flag = flag * (1.0 - is_unk)\n for t in range(tmax):\n if flag[:, t].sum() > 0:\n acc_time[t].append(correct[:, t].sum())\n acc_time = [np.array(l) for l in acc_time]\n return np.array([mean(l) for l in acc_time]), np.array(\n [stderr(l) for l in acc_time])\n\n\nif __name__ == '__main__':\n y_s = np.array([[1, 10, 3, 2, 10, 2, 3, 1, 2, 2, 2, 2]])\n y_full = np.array([[1, 2, 3, 2, 2, 2, 3, 1, 2, 2, 2, 2]])\n y_gt = np.array([[10, 10, 10, 10, 2, 2, 3, 1, 2, 2, 2, 2]])\n pred = np.array([[10, 10, 10, 10, 2, 2, 3, 1, 2, 3, 2, 2]])\n flag = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])\n pred2 = np.zeros([1, y_s.shape[1], 11])\n pred2[np.zeros([y_s.shape[1]], dtype=y_s.dtype),\n np.arange(y_s.shape[1]), pred[0]] = 1.0\n print(pred2)\n results_list = [{\n 'y_s': y_s,\n 'y_gt': y_gt,\n 'y_full': y_full,\n 'pred': pred2,\n 'flag': flag\n }]\n print(calc_nshot_acc(results_list, nshot_max=5, labeled=True))\n print(calc_nshot_acc(results_list, nshot_max=5))\n print(calc_ap(results_list, verbose=True))\n print(calc_nshot_acc_2d(results_list, nappear_max=5, nshot_max=5))\n print('interval', calc_interval(y_full))\n acc, se = calc_acc_time_label(results_list, tmax=12)\n print(acc)\n",
"\"\"\"Online mixture memory that performs online clustering.\n\nAuthor: Mengye Ren ([email protected])\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport tensorflow as tf\n\nfrom fewshot.models.modules.example_memory import ExampleMemory\nfrom fewshot.models.registry import RegisterModule\n\nINF = 1e6\n\n\n@RegisterModule(\"online_imp_memory\")\n@RegisterModule(\"online_mixture_memory\") # Legacy name\nclass OnlineIMPMemory(ExampleMemory):\n\n def forward_one(self,\n x,\n y,\n t,\n cmean,\n clabel,\n cusage,\n is_training=tf.constant(True)):\n y_ = self.retrieve(x, t, cmean, clabel, cusage, is_training=is_training)\n # klogits, remain, new = self._infer_one(x, cmean, clabel, cusage, y=y)\n klogits, new = self._infer_one(x, cmean, clabel, cusage, y=y)\n new_id = tf.reduce_sum(tf.cast(tf.greater(cusage, 0), tf.int64),\n [1]) # [B]\n kidx = tf.where(tf.less(new, 0.0), tf.argmax(klogits, axis=1), new_id)\n cmean, clabel, cusage = self.store(x, kidx, y, t, cmean, clabel, cusage)\n return y_, (cmean, clabel, cusage)\n\n def retrieve(self,\n x,\n t,\n cmean,\n clabel,\n cusage,\n is_training=tf.constant(True)):\n # clogits, remain, new = self.infer(x, t, cmean, clabel, cusage)\n clogits, new = self.infer(x, t, cmean, clabel, cusage)\n new_ = tf.reshape(new, [-1, 1])\n pad = tf.zeros_like(clogits)[:, :-1] - INF\n # TODO use scatter_nd to assign unknown ID.\n logits_unk = tf.concat([pad, new_], axis=1)\n logits = tf.maximum(clogits, logits_unk)\n return logits\n\n def infer(self, x, t, cmean, clabel, cusage):\n \"\"\"Infer cluster ID. Either goes into one of the existing cluster\n or become a new cluster. This procedure is for prediction purpose.\n\n Args:\n x: Input. [B, D]\n cmean: Cluster centers. [B, K, D]\n clabel: Cluster labels. [B, K]\n cusage: Usage binary vector for the cluster. [B, K]\n\n Returns:\n logits: Cluster logits. [B, M]\n new_prob: New cluster probability. [B]\n \"\"\"\n # logits, remain, new = self._infer_one(x, cmean, clabel, cusage)\n logits, new = self._infer_one(x, cmean, clabel, cusage)\n kprob = tf.nn.softmax(logits) # [B, K]\n clabel_onehot = tf.one_hot(clabel, self.unknown_id + 1) # [B, K', C]\n # [B, K, 1] * [B, K, C] = [B, C]\n cprob = tf.reduce_sum(tf.expand_dims(kprob, -1) * clabel_onehot, [1])\n cprob = tf.maximum(cprob, 1e-6) # Delta.\n return tf.math.log(cprob), new\n\n def get_initial_state(self, bsize):\n \"\"\"Initial state for the RNN.\"\"\"\n M = self.max_items\n dim = self.dim\n\n # Cluster storage.\n cmean = tf.zeros([bsize, M, dim], dtype=self.dtype)\n clabel = tf.zeros([bsize, M], dtype=tf.int64)\n\n # Number of examples per cluster.\n cusage = tf.zeros([bsize, M], dtype=self.dtype)\n return cmean, clabel, cusage\n\n def _infer_one(self, x, cmean, clabel, cusage, y=None, verbose=False):\n \"\"\"Infers one example.\n\n Args:\n x: Input. [B, D]\n cmean: Cluster centers. [B, K, D]\n clabel: Cluster labels. [B, K]\n cusage: Usage binary vector for the cluster. [B, K]\n\n Returns:\n logits: Cluster logits. [B, M]\n remain: Old cluster logit. [B]\n \"\"\"\n # verbose = y is not None\n # verbose = False\n # Whether a cluster is used.\n cusage_flag = tf.greater(cusage, 0) # [B, K]\n\n # Returns cluster ID and label.\n x_ = tf.expand_dims(x, 1) # [B, 1, D]\n pdist = tf.squeeze(self.compute_euclidean_dist_sq(x_, cmean), 1) # [B, K]\n pdist += tf.where(cusage_flag, 0.0, INF)\n\n if y is not None:\n y_ = tf.expand_dims(tf.cast(y, clabel.dtype), -1) # [B]\n rel_flag = tf.logical_or(\n tf.equal(clabel, y_), tf.equal(clabel, self.unknown_id))\n pdist += tf.where(rel_flag, 0.0, INF)\n\n # Variance parameter.\n labeled_cluster = clabel < self.unknown_id\n sigma = tf.where(labeled_cluster, self.sigma_l, self.sigma_u)\n\n # Need to consider labeled case here.\n min_dist = tf.reduce_min(pdist, [-1]) # [B]\n # remain = (self._beta - min_dist) / self._gamma # [B]\n new = (min_dist - self._beta) / self._gamma # [B]\n pdist = pdist / (2.0 * sigma**2)\n return -pdist, new\n\n def store(self, x, kidx, y, t, cmean, clabel, cusage):\n \"\"\"Stores a new example.\n\n Args:\n x: Input. [B, ...].\n kidx: Cluster Idx. [B]\n y: Label. [B]\n t: Int. Timestep.\n cmean: [B, M, D].\n clabel: [B, M].\n cusage: [B, M].\n \"\"\"\n # Push into the example storage.\n bidx = tf.range(x.shape[0], dtype=tf.int64) # [B]\n bkidx = tf.stack([bidx, kidx], axis=-1) # [B, 2]\n # cusage_ = tf.cast(tf.expand_dims(cusage, -1), self.dtype) # [B, M, 1]\n\n cmean_cur = tf.gather_nd(cmean, bkidx) # [B, D]\n count = tf.gather_nd(cusage, bkidx) # [B]\n count_ = tf.expand_dims(count, -1) # [B]\n cmean_update = cmean_cur * count_ / (count_ + 1.0) + x / (count_ + 1.0)\n cmean_new = tf.tensor_scatter_nd_update(cmean, bkidx, cmean_update)\n\n cusage_update = count + 1\n cusage_new = tf.tensor_scatter_nd_update(cusage, bkidx, cusage_update)\n\n clabel_cur = tf.gather_nd(clabel, bkidx) # [B]\n clabel_cur = tf.where(tf.greater(count, 0), clabel_cur, self.unknown_id)\n # Prefer labeled vs. unlabeled.\n clabel_upd = tf.minimum(clabel_cur, tf.cast(y, clabel_cur.dtype))\n clabel_new = tf.tensor_scatter_nd_update(clabel, bkidx, clabel_upd)\n return cmean_new, clabel_new, cusage_new\n",
"\"\"\"Unit tests for semi-supervised episode iterator.\n\nAuthor: Mengye Ren ([email protected])\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport tensorflow as tf\nimport unittest\n\nfrom fewshot.data.datasets.omniglot import OmniglotDataset\nfrom fewshot.data.iterators.semisupervised_episode_iterator import SemiSupervisedEpisodeIterator # NOQA\nfrom fewshot.data.preprocessors import NormalizationPreprocessor\nfrom fewshot.data.samplers.crp_sampler import CRPSampler\nfrom fewshot.data.samplers.semisupervised_episode_sampler import SemiSupervisedEpisodeSampler # NOQA\n\n\nclass SemiSupervisedEpisodeIteratorTests(unittest.TestCase):\n\n def test_basic(self):\n folder = '/mnt/local/data/omniglot'\n omniglot = OmniglotDataset(folder, 'train')\n preprocessor = NormalizationPreprocessor()\n for bsize in [1, 2]:\n sampler = CRPSampler(0)\n sampler2 = SemiSupervisedEpisodeSampler(sampler, 0)\n it = SemiSupervisedEpisodeIterator(\n omniglot,\n sampler2,\n batch_size=bsize,\n nclasses=10,\n nquery=5,\n preprocessor=preprocessor,\n expand=True,\n fix_unknown=True,\n label_ratio=0.5,\n nd=5,\n sd=1,\n md=2,\n alpha=0.5,\n theta=1.0)\n for x in range(2):\n b = it.next()\n print(b)\n print('support', tf.reduce_max(b.train_images),\n tf.reduce_min(b.train_images), tf.shape(b.train_images))\n print('support label', b.train_labels, tf.shape(b.train_labels))\n print('support gt', b.train_groundtruth, tf.shape(b.train_groundtruth))\n print('query', tf.reduce_max(b.test_images),\n tf.reduce_min(b.test_images), tf.shape(b.test_images))\n print('query label', b.test_labels, tf.shape(b.test_labels))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport tensorflow as tf\n\nfrom fewshot.models.registry import RegisterModule\nfrom fewshot.models.modules.container_module import ContainerModule\nfrom fewshot.models.modules.nnlib import Linear\nfrom fewshot.models.modules.layer_norm import LayerNorm\nfrom fewshot.models.variable_context import variable_scope\n\n\n@RegisterModule('gru')\nclass GRU(ContainerModule):\n \"\"\"Gated recurrent unit\"\"\"\n\n def __init__(self, name, nin, nout, dtype=tf.float32):\n super(GRU, self).__init__(dtype=dtype)\n self._nin = nin\n self._nout = nout\n\n with variable_scope(name):\n self._gates = Linear(\"gates_linear\", nin + nout, 2 * nout)\n self._linear = Linear(\"linear\", nin + nout, nout)\n\n def forward(self, x, h_last):\n \"\"\"Forward one timestep.\n\n Args:\n x: [B, D]. Input.\n h_last: [B, D]. Hidden states of the previous timestep.\n\n Returns:\n h\n \"\"\"\n D = self.nout\n x_comb = tf.concat([x, h_last], axis=-1)\n gates = self._gates(x_comb)\n r_gate = tf.math.sigmoid(gates[:, :D])\n z_gate = tf.math.sigmoid(gates[:, D:])\n h_hat = tf.math.tanh(self._linear(tf.concat([x, h_last * r_gate])))\n h = (1.0 - z_gate) * h_hat + z_gate * h_hat\n return h\n\n def end_iteration(self, h_last):\n return h_last\n\n def get_initial_state(self, bsize):\n return tf.zeros([bsize, self.nout], dtype=self.dtype)\n\n @property\n def nin(self):\n return self._nin\n\n @property\n def nout(self):\n return self._nout\n\n @property\n def in_dim(self):\n return self._nin\n\n @property\n def memory_dim(self):\n return self._nout\n\n\n@RegisterModule('gru1dmod')\nclass GRU1DMod(GRU):\n \"\"\"GRU with 1-d gates and without activation\"\"\"\n\n def __init__(self, name, nin, nout, layernorm=False, bias_init=-2.0, dtype=tf.float32):\n super(GRU, self).__init__(dtype=dtype)\n self._nin = nin\n self._nout = nout\n self._layernorm = layernorm\n self._gates = Linear(\n \"gates_linear\", nin + nout, 1, b_init=lambda: tf.ones(1) * bias_init)\n # self._gates = Linear(\n # \"gates_linear\", nin + nout, 1, b_init=lambda: tf.ones(1) * 2.0)\n # self._gates = Linear(\n # \"gates_linear\", nin + nout, 1, b_init=lambda: tf.zeros(1))\n if layernorm:\n self._ln = LayerNorm(\"layernorm\", nin + nout, dtype=dtype)\n # assert False\n\n def forward(self, x, h_last):\n \"\"\"Forward one timestep.\n\n Args:\n x: [B, D]. Input.\n h_last: [B, D]. Hidden states of the previous timestep.\n\n Returns:\n h\n \"\"\"\n x_comb = tf.concat([x, h_last], axis=-1)\n if self._layernorm:\n x_comb = self._ln(x_comb)\n gates = self._gates(x_comb)\n f_gate = tf.math.sigmoid(gates)\n # tf.print('f gate', f_gate)\n h = (1.0 - f_gate) * h_last + f_gate * x\n return h, h\n\n def end_iteration(self, h_last):\n return h_last\n\n def get_initial_state(self, bsize):\n return tf.zeros([bsize, self.nout], dtype=self.dtype)\n\n @property\n def nin(self):\n return self._nin\n\n @property\n def nout(self):\n return self._nout\n\n @property\n def in_dim(self):\n return self._nin\n\n @property\n def memory_dim(self):\n return self._nout\n\n\n@RegisterModule('lstm1dmod')\nclass LSTM1DMod(ContainerModule):\n \"\"\"A standard LSTM module.\"\"\"\n\n def __init__(self, name, nin, nout, dtype=tf.float32):\n super(LSTM1DMod, self).__init__(dtype=dtype)\n self._nin = nin\n self._nout = nout\n\n with variable_scope(name):\n self._gates = Linear(\"gates_linear\", nin + nout, nout + 2)\n # self._gates2 = Linear(\"gates_linear\", nout, nout)\n\n def forward(self, x, c_last, h_last):\n \"\"\"Forward one timestep.\n\n Args:\n x: [B, D]. Input.\n c_last: [B, D]. Cell states of the previous time step.\n h_last: [B, D]. Hidden states of the previous time step.\n\n Returns:\n A tuple of output and the hidden states.\n \"\"\"\n x_comb = tf.concat([x, h_last], axis=-1)\n gates = self._gates(x_comb)\n D = self.nout\n f_gate = tf.sigmoid(gates[:, :1])\n i_gate = tf.sigmoid(gates[:, 1:2])\n # o_gate = tf.sigmoid(gates[:, 2:2 + D])\n o_gate = tf.sigmoid(gates[:, 2:3])\n # c = c_last * f_gate + x * i_gate\n c = c_last * f_gate + x * (1 - f_gate)\n h = o_gate * tf.tanh(c)\n # h = tf.tanh(c2)\n return h, (c, h)\n\n def end_iteration(self, h_last):\n \"\"\"End recurrent iterations.\"\"\"\n return h_last\n\n def get_initial_state(self, bsize):\n return (tf.zeros([bsize, self.nout], dtype=self.dtype),\n tf.zeros([bsize, self.nout], dtype=self.dtype))\n\n @property\n def nin(self):\n return self._nin\n\n @property\n def nout(self):\n return self._nout\n\n @property\n def in_dim(self):\n return self._nin\n\n @property\n def memory_dim(self):\n return self._nout\n",
"\"\"\"Online prototypical network. This one uses sigmoid probability to indicate\nunknowns.\n\nAuthor: Mengye Ren ([email protected])\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport tensorflow as tf\n\nfrom fewshot.models.nets.episode_recurrent_sigmoid_net import EpisodeRecurrentSigmoidNet # NOQA\nfrom fewshot.models.registry import RegisterModel\nfrom fewshot.utils.logger import get as get_logger\n\nlog = get_logger()\n\n\n@RegisterModel(\"online_proto_sigmoid_net\")\n@RegisterModel(\"proto_mem_sigmoid_net\") # Legacy name\nclass OnlineProtoSigmoidNet(EpisodeRecurrentSigmoidNet):\n \"\"\"A memory network that keeps updating the prototypes.\"\"\"\n\n def __init__(self,\n config,\n backbone,\n memory,\n distributed=False,\n dtype=tf.float32):\n super(OnlineProtoSigmoidNet, self).__init__(\n config, backbone, distributed=distributed, dtype=dtype)\n self._memory = memory\n\n def forward(self,\n x,\n y,\n s=None,\n x_test=None,\n is_training=tf.constant(True),\n backbone_is_training=None,\n last_is_training=None,\n **kwargs):\n \"\"\"Make a forward pass.\n Args:\n x: [B, T, ...]. Support examples at each timestep.\n y: [B, T]. Support labels at each timestep, note that the label is not\n revealed until the next step.\n\n Returns:\n y_pred: [B, T, K+1], Logits at each timestep.\n \"\"\"\n B = tf.constant(x.shape[0])\n T = tf.constant(x.shape[1])\n if backbone_is_training is None:\n backbone_is_training = is_training\n log.info(f\"Backbone is training: {backbone_is_training}\")\n h = self.run_backbone(x, is_training=backbone_is_training, last_is_training=last_is_training)\n y_pred = tf.TensorArray(self.dtype, size=T)\n states = self.memory.get_initial_state(h.shape[0])\n\n if self.config.ssl_store_schedule:\n log.info(\"Using probabilistic semisupervised store schedule\")\n store_prob = tf.compat.v1.train.piecewise_constant(\n self._step, [2000, 4000, 6000, 8000, 10000],\n [0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\n ssl_store = tf.less(tf.random.uniform([B, T], 0.0, 1.0), store_prob)\n self._ssl_store = ssl_store\n else:\n ssl_store = tf.ones([B, T], dtype=tf.bool)\n\n for t in tf.range(T):\n x_ = self.slice_time(h, t) # [B, ...]\n y_ = self.slice_time(y, t) # [B]\n if s is None:\n s_ = None\n else:\n s_ = self.slice_time(s, t)\n y_pred_, states = self.memory.forward_one(\n x_,\n y_,\n t,\n *states,\n s=s_,\n add_new=tf.constant(True),\n is_training=is_training,\n ssl_store=ssl_store[:, t])\n y_pred = y_pred.write(t, y_pred_)\n y_pred = tf.transpose(y_pred.stack(), [1, 0, 2])\n if x_test is not None:\n x_test = self.run_backbone(x_test, is_training=is_training) # [B, N, D]\n y_test_pred = self.memory.retrieve_all(\n x_test, add_new=tf.constant(False))\n return y_pred, y_test_pred\n else:\n return y_pred\n\n @property\n def memory(self):\n \"\"\"Memory module\"\"\"\n return self._memory\n\n\nfrom fewshot.models.nets.episode_recurrent_sigmoid_trunc_net import EpisodeRecurrentSigmoidTruncNet # NOQA\n\n\n@RegisterModel(\"proto_mem_sigmoid_trunc_net\")\nclass ProtoMemSigmoidTruncNet(EpisodeRecurrentSigmoidTruncNet):\n \"\"\"A memory network that keeps updating the prototypes.\"\"\"\n\n def __init__(self,\n config,\n backbone,\n memory,\n distributed=False,\n dtype=tf.float32):\n super(ProtoMemSigmoidTruncNet, self).__init__(\n config, backbone, distributed=distributed, dtype=dtype)\n self._memory = memory\n\n def forward(self,\n x,\n y,\n t0,\n dt,\n *states,\n is_training=tf.constant(True),\n **kwargs):\n \"\"\"Make a forward pass.\n Args:\n x: [B, T, ...]. Support examples at each timestep.\n y: [B, T]. Support labels at each timestep, note that the label is not\n revealed until the next step.\n\n Returns:\n y_pred: [B, T, K+1], Logits at each timestep.\n \"\"\"\n h = self.run_backbone(x, is_training=is_training)\n y_pred = tf.TensorArray(self.dtype, size=dt)\n if len(states) == 0:\n states = self.memory.get_initial_state(tf.shape(h)[0])\n cold_start = True\n else:\n cold_start = False\n for t in tf.range(dt):\n x_ = self.slice_time(h, t) # [B, ...]\n y_ = self.slice_time(y, t) # [B]\n y_pred_, states = self.memory.forward_one(\n x_,\n y_,\n t,\n *states,\n add_new=tf.constant(True),\n is_training=is_training)\n y_pred = y_pred.write(t, y_pred_)\n y_pred = tf.transpose(y_pred.stack(), [1, 0, 2])\n if cold_start:\n return y_pred\n else:\n return y_pred, states\n\n @property\n def memory(self):\n \"\"\"Memory module\"\"\"\n return self._memory\n"
] |
[
[
"numpy.logical_not",
"numpy.expand_dims",
"numpy.logical_and",
"numpy.arange",
"numpy.cumsum",
"numpy.concatenate",
"numpy.max",
"numpy.argmax",
"numpy.argsort",
"numpy.array",
"numpy.zeros"
],
[
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.tensor_scatter_nd_update",
"tensorflow.where",
"tensorflow.greater",
"tensorflow.argmax",
"tensorflow.gather_nd",
"tensorflow.less",
"tensorflow.zeros_like",
"tensorflow.one_hot",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.math.log",
"tensorflow.reduce_min"
],
[
"tensorflow.reduce_max",
"tensorflow.reduce_min",
"tensorflow.shape"
],
[
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.sigmoid",
"tensorflow.ones",
"tensorflow.math.sigmoid",
"tensorflow.tanh"
],
[
"tensorflow.constant",
"tensorflow.range",
"tensorflow.shape",
"tensorflow.TensorArray",
"tensorflow.random.uniform",
"tensorflow.ones",
"tensorflow.compat.v1.train.piecewise_constant"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
exmee/HSSD
|
[
"cf1d26c32b1a5a95c6c17460dda445c408d7b5dc",
"cf1d26c32b1a5a95c6c17460dda445c408d7b5dc"
] |
[
"resnet_v1.py",
"Conv2DWN.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains definitions for the original form of Residual Networks.\n\nThe 'v1' residual networks (ResNets) implemented in this module were proposed\nby:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n\nOther variants were introduced in:\n[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Identity Mappings in Deep Residual Networks. arXiv: 1603.05027\n\nThe networks defined in this module utilize the bottleneck building block of\n[1] with projection shortcuts only for increasing depths. They employ batch\nnormalization *after* every weight layer. This is the architecture used by\nMSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and\nResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'\narchitecture and the alternative 'v2' architecture of [2] which uses batch\nnormalization *before* every weight layer in the so-called full pre-activation\nunits.\n\nTypical use:\n\n from tensorflow.contrib.slim.nets import resnet_v1\n\nResNet-101 for image classification into 1000 classes:\n\n # inputs has shape [batch, 224, 224, 3]\n with slim.arg_scope(resnet_v1.resnet_arg_scope(is_training)):\n net, end_points = resnet_v1.resnet_v1_101(inputs, 1000)\n\nResNet-101 for semantic segmentation into 21 classes:\n\n # inputs has shape [batch, 513, 513, 3]\n with slim.arg_scope(resnet_v1.resnet_arg_scope(is_training)):\n net, end_points = resnet_v1.resnet_v1_101(inputs,\n 21,\n global_pool=False,\n output_stride=16)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n#import Conv2DWN\n\nimport resnet_utils\nfrom blocks import attention_block\nfrom config import args\n\nresnet_arg_scope = resnet_utils.resnet_arg_scope\nslim = tf.contrib.slim\n\n'''分割部分和跳跃连接层中使用attention,检测为0.789'''\[email protected]_arg_scope\ndef tail_att(inputs, skip, depth, depth_bottleneck, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear': #双线性插值\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest': #最近邻插值\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n #skip = attention_block(skip)\n res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method) #res_inpt为input经过插值处理\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut') #第一次resskip时,2048 != 512,要把shortcut的深度变为512\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(concat, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n '''********************attention****************************'''\n #output = attention_block(output)\n '''********************attention****************************'''\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\[email protected]_arg_scope #attention_layer\ndef attention_layer(inputs, skip, depth, depth_bottleneck, scale=2, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear': #双线性插值\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest': #最近邻插值\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n #res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method)\n res_inpt = slim.conv2d(inputs, depth * scale * scale, [1, 1], stride=1, padding='SAME',\n activation_fn=None, scope=scope)\n\n res_inpt = tf.depth_to_space(res_inpt, scale)\n res_inpt = tf.image.resize_images(res_inpt, tf.shape(skip)[1:3], method=resize_method)\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut') #第一次resskip时,2048 != 512,要把shortcut的深度变为512\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(concat, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n output = attention_block(output)\n # print(\"So far in the end of bottleneck skip we have %s\" % (output.get_shape()))\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\[email protected]_arg_scope #attention\ndef attention(inputs, skip, depth, depth_bottleneck, scale=2, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear': #双线性插值\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest': #最近邻插值\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n #res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method)\n skip = attention_block(skip)\n res_inpt = slim.conv2d(inputs, depth * scale * scale, [1, 1], stride=1, padding='SAME',\n activation_fn=None, scope=scope)\n\n res_inpt = tf.depth_to_space(res_inpt, scale)\n res_inpt = tf.image.resize_images(res_inpt, tf.shape(skip)[1:3], method=resize_method)\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut') #第一次resskip时,2048 != 512,要把shortcut的深度变为512\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(concat, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n # print(\"So far in the end of bottleneck skip we have %s\" % (output.get_shape()))\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\n#定义像素混洗\[email protected]_arg_scope\ndef sub_pixel(inputs, skip, depth, outputs_collections=None, scale=2,scope=None):\n if args.resize == 'bilinear':\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest':\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'sub_pixel', [inputs]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n if depth != depth_in:\n inputs = slim.conv2d(inputs, depth, [1, 1], stride=1, padding='SAME',\n activation_fn=None, scope='shortcut')\n\n inputs = slim.conv2d(inputs, depth*scale*scale, [1, 1], stride=1, padding='SAME',\n activation_fn=None, scope=scope)\n output = tf.depth_to_space(inputs, scale)\n output = tf.image.resize_images(output, tf.shape(skip)[1:3], method=resize_method)\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\[email protected]_arg_scope #shuffle pixel\ndef sub_pixel_skip(inputs, skip, depth, depth_bottleneck, scale=2, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear': #双线性插值\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest': #最近邻插值\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n #res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method)\n res_inpt = slim.conv2d(inputs, depth * scale * scale, [1, 1], stride=1, padding='SAME',\n activation_fn=None, scope=scope)\n\n res_inpt = tf.depth_to_space(res_inpt, scale)\n res_inpt = tf.image.resize_images(res_inpt, tf.shape(skip)[1:3], method=resize_method)\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut') #第一次resskip时,2048 != 512,要把shortcut的深度变为512\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(concat, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n # print(\"So far in the end of bottleneck skip we have %s\" % (output.get_shape()))\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\[email protected]_arg_scope #不加concat层\ndef noconcat(inputs, skip, depth, depth_bottleneck, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear':\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest':\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method)\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut')\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n #concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(res_inpt, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n # print(\"So far in the end of bottleneck skip we have %s\" % (output.get_shape()))\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\[email protected]_arg_scope\ndef bottleneck_skip(inputs, skip, depth, depth_bottleneck, stride=1, rate=1,\n outputs_collections=None, scope=None):\n assert stride == 1\n if args.resize == 'bilinear': #双线性插值\n resize_method = tf.image.ResizeMethod.BILINEAR\n if args.resize == 'nearest': #最近邻插值\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n with tf.variable_scope(scope, 'bottleneck_skip', [inputs, skip]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n #skip = attention_block(skip)\n res_inpt = tf.image.resize_images(inputs, tf.shape(skip)[1:3], method=resize_method) #res_inpt为input经过插值处理\n if depth != depth_in: #\n shortcut = slim.conv2d(res_inpt, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut') #第一次resskip时,2048 != 512,要把shortcut的深度变为512\n else:\n shortcut = res_inpt\n\n # print(\"Live from skip bottleneck block! We got %s as input and %s as skip connection\" % (inputs.get_shape(), skip.get_shape()))\n concat = tf.concat([res_inpt, skip], 3)\n residual = slim.conv2d(concat, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n # print(\"So far in the end of bottleneck skip we have %s\" % (output.get_shape()))\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\n\[email protected]_arg_scope\ndef bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,\n outputs_collections=None, scope=None):\n\n \"\"\"Bottleneck residual unit variant with BN after convolutions.\n\n This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for\n its definition. Note that we use here the bottleneck variant which has an\n extra bottleneck layer.\n\n When putting together two consecutive ResNet blocks that use this unit, one\n should use stride = 2 in the last unit of the first block.\n\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The depth of the ResNet unit output.\n depth_bottleneck: The depth of the bottleneck layers.\n stride: The ResNet unit's stride. Determines the amount of downsampling of\n the units output compared to its input.\n rate: An integer, rate for atrous convolution.\n outputs_collections: Collection to add the ResNet unit output.\n scope: Optional variable_scope.\n\n Returns:\n The ResNet unit's output.\n \"\"\"\n with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n if depth == depth_in:\n shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')\n else:\n shortcut = slim.conv2d(inputs, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut')\n\n # residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,\n # scope='conv1')\n # residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,\n # rate=rate, scope='conv2')\n\n residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=stride,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n\n return slim.utils.collect_named_outputs(outputs_collections, sc.name,\n output)\n\n\ndef resnet_v1(inputs,\n blocks,\n num_classes=None,\n global_pool=True,\n output_stride=None,\n include_root_block=True,\n reuse=None,\n scope=None):\n \"\"\"Generator for v1 ResNet models.\n\n This function generates a family of ResNet v1 models. See the resnet_v1_*()\n methods for specific model instantiations, obtained by selecting different\n block instantiations that produce ResNets of various depths.\n\n Training for image classification on Imagenet is usually done with [224, 224]\n inputs, resulting in [7, 7] feature maps at the output of the last ResNet\n block for the ResNets defined in [1] that have nominal stride equal to 32.\n However, for dense prediction tasks we advise that one uses inputs with\n spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In\n this case the feature maps at the ResNet output will have spatial shape\n [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]\n and corners exactly aligned with the input image corners, which greatly\n facilitates alignment of the features to the image. Using as input [225, 225]\n images results in [8, 8] feature maps at the output of the last ResNet block.\n\n For dense prediction tasks, the ResNet needs to run in fully-convolutional\n (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all\n have nominal stride equal to 32 and a good choice in FCN mode is to use\n output_stride=16 in order to increase the density of the computed features at\n small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.\n\n Args:\n inputs: A tensor of size [batch, height_in, width_in, channels].\n blocks: A list of length equal to the number of ResNet blocks. Each element\n is a resnet_utils.Block object describing the units in the block.\n num_classes: Number of predicted classes for classification tasks. If None\n we return the features before the logit layer.\n global_pool: If True, we perform global average pooling before computing the\n logits. Set to True for image classification, False for dense prediction.\n output_stride: If None, then the output will be computed at the nominal\n network stride. If output_stride is not None, it specifies the requested\n ratio of input to output spatial resolution.\n include_root_block: If True, include the initial convolution followed by\n max-pooling, if False excludes it.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n\n Returns:\n net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].\n If global_pool is False, then height_out and width_out are reduced by a\n factor of output_stride compared to the respective height_in and width_in,\n else both height_out and width_out equal one. If num_classes is None, then\n net is the output of the last ResNet block, potentially after global\n average pooling. If num_classes is not None, net contains the pre-softmax\n activations.\n end_points: A dictionary from components of the network to the corresponding\n activation.\n\n Raises:\n ValueError: If the target output_stride is not valid.\n \"\"\"\n with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:\n end_points_collection = sc.name + '_end_points'\n with slim.arg_scope([slim.conv2d, bottleneck,\n resnet_utils.stack_blocks_dense],\n outputs_collections=end_points_collection):\n net = inputs\n if include_root_block:\n if output_stride is not None:\n if output_stride % 4 != 0:\n raise ValueError('The output_stride needs to be a multiple of 4.')\n output_stride /= 4\n net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')\n net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')\n net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)\n if global_pool:\n # Global average pooling.\n net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)\n pooled_features = net\n if num_classes is not None:\n net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, scope='logits')\n # Convert end_points_collection into a dictionary of end_points.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n if global_pool:\n end_points['pooled'] = slim.flatten(pooled_features, scope='pool_output')\n if num_classes is not None:\n end_points['predictions'] = slim.softmax(net, scope='predictions')\n return net, end_points\nresnet_v1.default_image_size = 224\n\n\ndef resnet_v1_50(inputs,\n num_classes=None,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_50'):\n \"\"\"ResNet-50 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)\n ]\n return resnet_v1(inputs, blocks, num_classes, global_pool, output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n\n\ndef resnet_v1_101(inputs,\n num_classes=None,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_101'):\n \"\"\"ResNet-101 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)\n ]\n return resnet_v1(inputs, blocks, num_classes, global_pool, output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n\n\ndef resnet_v1_152(inputs,\n num_classes=None,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_152'):\n \"\"\"ResNet-152 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)]\n return resnet_v1(inputs, blocks, num_classes, global_pool, output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n\n\ndef resnet_v1_200(inputs,\n num_classes=None,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_200'):\n \"\"\"ResNet-200 model of [2]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 23 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)]\n return resnet_v1(inputs, blocks, num_classes, global_pool, output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n",
"import tensorflow as tf\n\n\nclass Conv2DWeightNorm(tf.layers.Conv2D):\n\n def build(self, input_shape):\n self.wn_g = self.add_weight(\n name='wn_g',\n shape=(self.filters,),\n dtype=self.dtype,\n initializer=tf.initializers.ones,\n trainable=True,\n )\n super(Conv2DWeightNorm, self).build(input_shape)\n square_sum = tf.reduce_sum(\n tf.square(self.kernel), [0, 1, 2], keepdims=False)\n inv_norm = tf.rsqrt(square_sum)\n self.kernel = self.kernel * (inv_norm * self.wn_g)\n\n\ndef conv2d_weight_norm(inputs,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format='channels_last',\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer=None,\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n trainable=True,\n name=None,\n reuse=None):\n layer = Conv2DWeightNorm(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n trainable=trainable,\n name=name,\n dtype=inputs.dtype.base_dtype,\n _reuse=reuse,\n _scope=name)\n return layer.apply(inputs)"
] |
[
[
"tensorflow.nn.relu",
"tensorflow.depth_to_space",
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.reduce_mean",
"tensorflow.variable_scope"
],
[
"tensorflow.rsqrt",
"tensorflow.zeros_initializer",
"tensorflow.square"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
collector-m/LiDAR-MOS
|
[
"7ccbb63b4ee7c40195b35dd0dddd71473fae25b1",
"7ccbb63b4ee7c40195b35dd0dddd71473fae25b1"
] |
[
"utils/auxiliary/filelist2files.py",
"utils/gen_residual_images.py"
] |
[
"#!/usr/bin/python3\n\nimport os\nimport sys\nimport shutil\nimport numpy as np\nimport scipy.io as sio\n\nfrom tqdm import tqdm\n\ndef pack(array):\n \"\"\" convert a boolean array into a bitwise array. \"\"\"\n array = array.reshape((-1))\n\n #compressing bit flags.\n # yapf: disable\n compressed = array[::8] << 7 | array[1::8] << 6 | array[2::8] << 5 | array[3::8] << 4 | array[4::8] << 3 | array[5::8] << 2 | array[6::8] << 1 | array[7::8]\n # yapf: enable\n\n return np.array(compressed, dtype=np.uint8)\n\nif __name__ == \"__main__\":\n \"\"\"\n Convert a given directory of mat files and the given filelist into a separate directory\n containing the files in the file list.\n \"\"\"\n\n if len(sys.argv) < 2:\n print(\"./filelist2files.py <input-root-directory> <output-root-directory> [<filelist>]\")\n exit(1)\n\n src_dir = sys.argv[1]\n dst_dir = sys.argv[2]\n\n files = None\n\n if len(sys.argv) > 3:\n files = [line.strip().split(\"_\") for line in open(sys.argv[3])]\n else:\n\n seq_dirs = [d for d in os.listdir(src_dir) if os.path.isdir(os.path.join(src_dir, d))]\n files = []\n for d in seq_dirs:\n files.extend([(d, os.path.splitext(f)[0]) for f in os.listdir(os.path.join(src_dir, d, \"input\"))])\n\n print(\"Processing {} files.\".format(len(files)))\n\n for seq_dir, filename in tqdm(files):\n\n if os.path.exists(os.path.join(src_dir, seq_dir, \"input\", filename + \".mat\")):\n data = sio.loadmat(os.path.join(src_dir, seq_dir, \"input\", filename + \".mat\"))\n\n out_dir = os.path.join(dst_dir, seq_dir, \"voxels\")\n os.makedirs(out_dir, exist_ok=True)\n\n compressed = pack(data[\"voxels\"])\n compressed.tofile(os.path.join(out_dir, os.path.splitext(filename)[0] + \".bin\"))\n\n if os.path.exists(os.path.join(src_dir, seq_dir, \"target_gt\", filename + \".mat\")):\n data = sio.loadmat(os.path.join(src_dir, seq_dir, \"target_gt\", filename + \".mat\"))\n\n out_dir = os.path.join(dst_dir, seq_dir, \"voxels\")\n os.makedirs(out_dir, exist_ok=True)\n\n labels = data[\"voxels\"].astype(np.uint16)\n labels.tofile(os.path.join(out_dir, os.path.splitext(filename)[0] + \".label\"))\n\n occlusions = pack(data[\"occluded\"])\n occlusions.tofile(os.path.join(out_dir, os.path.splitext(filename)[0] + \".occluded\"))\n\n invalid = pack(data[\"invalid\"])\n invalid.tofile(os.path.join(out_dir, os.path.splitext(filename)[0] + \".invalid\"))\n",
"#!/usr/bin/env python3\n# Developed by Xieyuanli Chen\n# This file is covered by the LICENSE file in the root of this project.\n# Brief: This script generates residual images\n\nimport os\nimport sys\nimport yaml\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nfrom utils import load_poses, load_calib, load_files, load_vertex\n\ntry:\n from c_gen_virtual_scan import gen_virtual_scan as range_projection\nexcept:\n print(\"Using clib by $export PYTHONPATH=$PYTHONPATH:<path-to-library>\")\n print(\"Currently using python-lib to generate range images.\")\n from utils import range_projection\n\n\nif __name__ == '__main__':\n # load config file\n config_filename = 'config/data_preparing.yaml'\n if len(sys.argv) > 1:\n config_filename = sys.argv[1]\n \n if yaml.__version__ >= '5.1':\n config = yaml.load(open(config_filename), Loader=yaml.FullLoader)\n else:\n config = yaml.load(open(config_filename))\n \n # specify parameters\n num_frames = config['num_frames']\n debug = config['debug']\n normalize = config['normalize']\n num_last_n = config['num_last_n']\n visualize = config['visualize']\n visualization_folder = config['visualization_folder']\n \n # specify the output folders\n residual_image_folder = config['residual_image_folder']\n if not os.path.exists(residual_image_folder):\n os.makedirs(residual_image_folder)\n \n if visualize:\n if not os.path.exists(visualization_folder):\n os.makedirs(visualization_folder)\n \n # load poses\n pose_file = config['pose_file']\n poses = np.array(load_poses(pose_file))\n inv_frame0 = np.linalg.inv(poses[0])\n \n # load calibrations\n calib_file = config['calib_file']\n T_cam_velo = load_calib(calib_file)\n T_cam_velo = np.asarray(T_cam_velo).reshape((4, 4))\n T_velo_cam = np.linalg.inv(T_cam_velo)\n \n # convert kitti poses from camera coord to LiDAR coord\n new_poses = []\n for pose in poses:\n new_poses.append(T_velo_cam.dot(inv_frame0).dot(pose).dot(T_cam_velo))\n poses = np.array(new_poses)\n \n # load LiDAR scans\n scan_folder = config['scan_folder']\n scan_paths = load_files(scan_folder)\n \n # test for the first N scans\n if num_frames >= len(poses) or num_frames <= 0:\n print('generate training data for all frames with number of: ', len(poses))\n else:\n poses = poses[:num_frames]\n scan_paths = scan_paths[:num_frames]\n \n range_image_params = config['range_image']\n \n # generate residual images for the whole sequence\n for frame_idx in tqdm(range(len(scan_paths))):\n file_name = os.path.join(residual_image_folder, str(frame_idx).zfill(6))\n diff_image = np.full((range_image_params['height'], range_image_params['width']), 0,\n dtype=np.float32) # [H,W] range (0 is no data)\n \n # for the first N frame we generate a dummy file\n if frame_idx < num_last_n:\n np.save(file_name, diff_image)\n \n else:\n # load current scan and generate current range image\n current_pose = poses[frame_idx]\n current_scan = load_vertex(scan_paths[frame_idx])\n current_range = range_projection(current_scan.astype(np.float32),\n range_image_params['height'], range_image_params['width'],\n range_image_params['fov_up'], range_image_params['fov_down'],\n range_image_params['max_range'], range_image_params['min_range'])[:, :, 3]\n \n # load last scan, transform into the current coord and generate a transformed last range image\n last_pose = poses[frame_idx - num_last_n]\n last_scan = load_vertex(scan_paths[frame_idx - num_last_n])\n last_scan_transformed = np.linalg.inv(current_pose).dot(last_pose).dot(last_scan.T).T\n last_range_transformed = range_projection(last_scan_transformed.astype(np.float32),\n range_image_params['height'], range_image_params['width'],\n range_image_params['fov_up'], range_image_params['fov_down'],\n range_image_params['max_range'], range_image_params['min_range'])[:, :, 3]\n \n # generate residual image\n valid_mask = (current_range > range_image_params['min_range']) & \\\n (current_range < range_image_params['max_range']) & \\\n (last_range_transformed > range_image_params['min_range']) & \\\n (last_range_transformed < range_image_params['max_range'])\n difference = np.abs(current_range[valid_mask] - last_range_transformed[valid_mask])\n \n if normalize:\n difference = np.abs(current_range[valid_mask] - last_range_transformed[valid_mask]) / current_range[valid_mask]\n\n diff_image[valid_mask] = difference\n \n if debug:\n fig, axs = plt.subplots(3)\n axs[0].imshow(last_range_transformed)\n axs[1].imshow(current_range)\n axs[2].imshow(diff_image, vmin=0, vmax=10)\n plt.show()\n \n if visualize:\n fig = plt.figure(frameon=False, figsize=(16, 10))\n fig.set_size_inches(20.48, 0.64)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n ax.imshow(diff_image, vmin=0, vmax=1)\n image_name = os.path.join(visualization_folder, str(frame_idx).zfill(6))\n plt.savefig(image_name)\n plt.close()\n\n # save residual image\n np.save(file_name, diff_image)\n"
] |
[
[
"numpy.array"
],
[
"numpy.abs",
"numpy.linalg.inv",
"numpy.asarray",
"matplotlib.pyplot.Axes",
"matplotlib.pyplot.subplots",
"numpy.save",
"numpy.full",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
artursbm/fuzzy-logic
|
[
"79a4879deb7b09b4738b0c82234506b8ab1b0392"
] |
[
"fuzzy_c_means/main_fcm_validation.py"
] |
[
"# Artur Mello\n# Fuzzy C Means - Algorithm validation and performance analysis\n# TP 1 - Sistemas Nebulosos\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import io\nfrom fuzzy_c_means import fuzzy_c_means\n\n\ndef main():\n k = 4\n samples = np.asarray(io.loadmat(\"fcm_dataset.mat\")[\"x\"])\n avg_iterations = 0\n reps = 100\n\n for i in range(reps):\n\n samples, centroids, data_clusters, iterations = fuzzy_c_means(samples, k)\n avg_iterations += iterations\n\n plt.scatter(samples[:,0], samples[:, 1], c=data_clusters[:, 0])\n plt.scatter(centroids[:,0], centroids[:, 1], c='red')\n plt.title('Amostras Categorizadas')\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.savefig('teste_fcm.png')\n plt.show()\n print(\"Convergência alcançada, em média, em {} iterações\".format(avg_iterations/reps))\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"scipy.io.loadmat",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
jancervenka/pandas
|
[
"b2ebd5ae14580dde793e40097c6a283d82c69ad9"
] |
[
"pandas/conftest.py"
] |
[
"from collections import abc\nfrom datetime import date, time, timedelta, timezone\nfrom decimal import Decimal\nimport operator\nimport os\n\nfrom dateutil.tz import tzlocal, tzutc\nimport hypothesis\nfrom hypothesis import strategies as st\nimport numpy as np\nimport pytest\nfrom pytz import FixedOffset, utc\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import DataFrame\nimport pandas._testing as tm\nfrom pandas.core import ops\nfrom pandas.core.indexes.api import Index, MultiIndex\n\nhypothesis.settings.register_profile(\n \"ci\",\n # Hypothesis timing checks are tuned for scalars by default, so we bump\n # them from 200ms to 500ms per test case as the global default. If this\n # is too short for a specific test, (a) try to make it faster, and (b)\n # if it really is slow add `@settings(deadline=...)` with a working value,\n # or `deadline=None` to entirely disable timeouts for that test.\n deadline=500,\n suppress_health_check=(hypothesis.HealthCheck.too_slow,),\n)\nhypothesis.settings.load_profile(\"ci\")\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--skip-slow\", action=\"store_true\", help=\"skip slow tests\")\n parser.addoption(\"--skip-network\", action=\"store_true\", help=\"skip network tests\")\n parser.addoption(\"--skip-db\", action=\"store_true\", help=\"skip db tests\")\n parser.addoption(\n \"--run-high-memory\", action=\"store_true\", help=\"run high memory tests\"\n )\n parser.addoption(\"--only-slow\", action=\"store_true\", help=\"run only slow tests\")\n parser.addoption(\n \"--strict-data-files\",\n action=\"store_true\",\n help=\"Fail if a test is skipped for missing data file.\",\n )\n\n\ndef pytest_runtest_setup(item):\n if \"slow\" in item.keywords and item.config.getoption(\"--skip-slow\"):\n pytest.skip(\"skipping due to --skip-slow\")\n\n if \"slow\" not in item.keywords and item.config.getoption(\"--only-slow\"):\n pytest.skip(\"skipping due to --only-slow\")\n\n if \"network\" in item.keywords and item.config.getoption(\"--skip-network\"):\n pytest.skip(\"skipping due to --skip-network\")\n\n if \"db\" in item.keywords and item.config.getoption(\"--skip-db\"):\n pytest.skip(\"skipping due to --skip-db\")\n\n if \"high_memory\" in item.keywords and not item.config.getoption(\n \"--run-high-memory\"\n ):\n pytest.skip(\"skipping high memory test since --run-high-memory was not set\")\n\n\[email protected](autouse=True)\ndef configure_tests():\n \"\"\"\n Configure settings for all tests and test modules.\n \"\"\"\n pd.set_option(\"chained_assignment\", \"raise\")\n\n\[email protected](autouse=True)\ndef add_imports(doctest_namespace):\n \"\"\"\n Make `np` and `pd` names available for doctests.\n \"\"\"\n doctest_namespace[\"np\"] = np\n doctest_namespace[\"pd\"] = pd\n\n\[email protected](params=[\"bsr\", \"coo\", \"csc\", \"csr\", \"dia\", \"dok\", \"lil\"])\ndef spmatrix(request):\n \"\"\"\n Yields scipy sparse matrix classes.\n \"\"\"\n from scipy import sparse\n\n return getattr(sparse, request.param + \"_matrix\")\n\n\[email protected](params=[0, 1, \"index\", \"columns\"], ids=lambda x: f\"axis {repr(x)}\")\ndef axis(request):\n \"\"\"\n Fixture for returning the axis numbers of a DataFrame.\n \"\"\"\n return request.param\n\n\naxis_frame = axis\n\n\[email protected](params=[0, \"index\"], ids=lambda x: f\"axis {repr(x)}\")\ndef axis_series(request):\n \"\"\"\n Fixture for returning the axis numbers of a Series.\n \"\"\"\n return request.param\n\n\[email protected]\ndef ip():\n \"\"\"\n Get an instance of IPython.InteractiveShell.\n\n Will raise a skip if IPython is not installed.\n \"\"\"\n pytest.importorskip(\"IPython\", minversion=\"6.0.0\")\n from IPython.core.interactiveshell import InteractiveShell\n\n return InteractiveShell()\n\n\[email protected](params=[True, False, None])\ndef observed(request):\n \"\"\"\n Pass in the observed keyword to groupby for [True, False]\n This indicates whether categoricals should return values for\n values which are not in the grouper [False / None], or only values which\n appear in the grouper [True]. [None] is supported for future compatibility\n if we decide to change the default (and would need to warn if this\n parameter is not passed).\n \"\"\"\n return request.param\n\n\[email protected](params=[True, False, None])\ndef ordered_fixture(request):\n \"\"\"\n Boolean 'ordered' parameter for Categorical.\n \"\"\"\n return request.param\n\n\n_all_arithmetic_operators = [\n \"__add__\",\n \"__radd__\",\n \"__sub__\",\n \"__rsub__\",\n \"__mul__\",\n \"__rmul__\",\n \"__floordiv__\",\n \"__rfloordiv__\",\n \"__truediv__\",\n \"__rtruediv__\",\n \"__pow__\",\n \"__rpow__\",\n \"__mod__\",\n \"__rmod__\",\n]\n\n\[email protected](params=_all_arithmetic_operators)\ndef all_arithmetic_operators(request):\n \"\"\"\n Fixture for dunder names for common arithmetic operations.\n \"\"\"\n return request.param\n\n\[email protected](\n params=[\n operator.add,\n ops.radd,\n operator.sub,\n ops.rsub,\n operator.mul,\n ops.rmul,\n operator.truediv,\n ops.rtruediv,\n operator.floordiv,\n ops.rfloordiv,\n operator.mod,\n ops.rmod,\n operator.pow,\n ops.rpow,\n ]\n)\ndef all_arithmetic_functions(request):\n \"\"\"\n Fixture for operator and roperator arithmetic functions.\n\n Notes\n -----\n This includes divmod and rdivmod, whereas all_arithmetic_operators\n does not.\n \"\"\"\n return request.param\n\n\n_all_numeric_reductions = [\n \"sum\",\n \"max\",\n \"min\",\n \"mean\",\n \"prod\",\n \"std\",\n \"var\",\n \"median\",\n \"kurt\",\n \"skew\",\n]\n\n\[email protected](params=_all_numeric_reductions)\ndef all_numeric_reductions(request):\n \"\"\"\n Fixture for numeric reduction names.\n \"\"\"\n return request.param\n\n\n_all_boolean_reductions = [\"all\", \"any\"]\n\n\[email protected](params=_all_boolean_reductions)\ndef all_boolean_reductions(request):\n \"\"\"\n Fixture for boolean reduction names.\n \"\"\"\n return request.param\n\n\n_cython_table = pd.core.base.SelectionMixin._cython_table.items()\n\n\[email protected](params=list(_cython_table))\ndef cython_table_items(request):\n \"\"\"\n Yields a tuple of a function and its corresponding name. Correspond to\n the list of aggregator \"Cython functions\" used on selected table items.\n \"\"\"\n return request.param\n\n\ndef _get_cython_table_params(ndframe, func_names_and_expected):\n \"\"\"\n Combine frame, functions from SelectionMixin._cython_table\n keys and expected result.\n\n Parameters\n ----------\n ndframe : DataFrame or Series\n func_names_and_expected : Sequence of two items\n The first item is a name of a NDFrame method ('sum', 'prod') etc.\n The second item is the expected return value.\n\n Returns\n -------\n list\n List of three items (DataFrame, function, expected result)\n \"\"\"\n results = []\n for func_name, expected in func_names_and_expected:\n results.append((ndframe, func_name, expected))\n results += [\n (ndframe, func, expected)\n for func, name in _cython_table\n if name == func_name\n ]\n return results\n\n\[email protected](params=[\"__eq__\", \"__ne__\", \"__le__\", \"__lt__\", \"__ge__\", \"__gt__\"])\ndef all_compare_operators(request):\n \"\"\"\n Fixture for dunder names for common compare operations\n\n * >=\n * >\n * ==\n * !=\n * <\n * <=\n \"\"\"\n return request.param\n\n\[email protected](params=[\"__le__\", \"__lt__\", \"__ge__\", \"__gt__\"])\ndef compare_operators_no_eq_ne(request):\n \"\"\"\n Fixture for dunder names for compare operations except == and !=\n\n * >=\n * >\n * <\n * <=\n \"\"\"\n return request.param\n\n\[email protected](\n params=[\"__and__\", \"__rand__\", \"__or__\", \"__ror__\", \"__xor__\", \"__rxor__\"]\n)\ndef all_logical_operators(request):\n \"\"\"\n Fixture for dunder names for common logical operations\n\n * |\n * &\n * ^\n \"\"\"\n return request.param\n\n\[email protected](params=[None, \"gzip\", \"bz2\", \"zip\", \"xz\"])\ndef compression(request):\n \"\"\"\n Fixture for trying common compression types in compression tests.\n \"\"\"\n return request.param\n\n\[email protected](params=[\"gzip\", \"bz2\", \"zip\", \"xz\"])\ndef compression_only(request):\n \"\"\"\n Fixture for trying common compression types in compression tests excluding\n uncompressed case.\n \"\"\"\n return request.param\n\n\[email protected](params=[True, False])\ndef writable(request):\n \"\"\"\n Fixture that an array is writable.\n \"\"\"\n return request.param\n\n\[email protected](scope=\"module\")\ndef datetime_tz_utc():\n \"\"\"\n Yields the UTC timezone object from the datetime module.\n \"\"\"\n return timezone.utc\n\n\[email protected](params=[\"utc\", \"dateutil/UTC\", utc, tzutc(), timezone.utc])\ndef utc_fixture(request):\n \"\"\"\n Fixture to provide variants of UTC timezone strings and tzinfo objects.\n \"\"\"\n return request.param\n\n\[email protected](params=[\"inner\", \"outer\", \"left\", \"right\"])\ndef join_type(request):\n \"\"\"\n Fixture for trying all types of join operations.\n \"\"\"\n return request.param\n\n\[email protected]\ndef strict_data_files(pytestconfig):\n \"\"\"\n Returns the configuration for the test setting `--strict-data-files`.\n \"\"\"\n return pytestconfig.getoption(\"--strict-data-files\")\n\n\[email protected]\ndef datapath(strict_data_files):\n \"\"\"\n Get the path to a data file.\n\n Parameters\n ----------\n path : str\n Path to the file, relative to ``pandas/tests/``\n\n Returns\n -------\n path including ``pandas/tests``.\n\n Raises\n ------\n ValueError\n If the path doesn't exist and the --strict-data-files option is set.\n \"\"\"\n BASE_PATH = os.path.join(os.path.dirname(__file__), \"tests\")\n\n def deco(*args):\n path = os.path.join(BASE_PATH, *args)\n if not os.path.exists(path):\n if strict_data_files:\n raise ValueError(\n f\"Could not find file {path} and --strict-data-files is set.\"\n )\n else:\n pytest.skip(f\"Could not find {path}.\")\n return path\n\n return deco\n\n\[email protected]\ndef iris(datapath):\n \"\"\"\n The iris dataset as a DataFrame.\n \"\"\"\n return pd.read_csv(datapath(\"data\", \"iris.csv\"))\n\n\[email protected](params=[\"nlargest\", \"nsmallest\"])\ndef nselect_method(request):\n \"\"\"\n Fixture for trying all nselect methods.\n \"\"\"\n return request.param\n\n\[email protected](params=[\"left\", \"right\", \"both\", \"neither\"])\ndef closed(request):\n \"\"\"\n Fixture for trying all interval closed parameters.\n \"\"\"\n return request.param\n\n\[email protected](params=[\"left\", \"right\", \"both\", \"neither\"])\ndef other_closed(request):\n \"\"\"\n Secondary closed fixture to allow parametrizing over all pairs of closed.\n \"\"\"\n return request.param\n\n\[email protected](params=[None, np.nan, pd.NaT, float(\"nan\"), np.float(\"NaN\"), pd.NA])\ndef nulls_fixture(request):\n \"\"\"\n Fixture for each null type in pandas.\n \"\"\"\n return request.param\n\n\nnulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture\n\n\[email protected](params=[None, np.nan, pd.NaT])\ndef unique_nulls_fixture(request):\n \"\"\"\n Fixture for each null type in pandas, each null type exactly once.\n \"\"\"\n return request.param\n\n\n# Generate cartesian product of unique_nulls_fixture:\nunique_nulls_fixture2 = unique_nulls_fixture\n\n\nTIMEZONES = [\n None,\n \"UTC\",\n \"US/Eastern\",\n \"Asia/Tokyo\",\n \"dateutil/US/Pacific\",\n \"dateutil/Asia/Singapore\",\n tzutc(),\n tzlocal(),\n FixedOffset(300),\n FixedOffset(0),\n FixedOffset(-300),\n timezone.utc,\n timezone(timedelta(hours=1)),\n timezone(timedelta(hours=-1), name=\"foo\"),\n]\nTIMEZONE_IDS = [repr(i) for i in TIMEZONES]\n\n\[email protected]_fixture_doc(str(TIMEZONE_IDS))\[email protected](params=TIMEZONES, ids=TIMEZONE_IDS)\ndef tz_naive_fixture(request):\n \"\"\"\n Fixture for trying timezones including default (None): {0}\n \"\"\"\n return request.param\n\n\[email protected]_fixture_doc(str(TIMEZONE_IDS[1:]))\[email protected](params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:])\ndef tz_aware_fixture(request):\n \"\"\"\n Fixture for trying explicit timezones: {0}\n \"\"\"\n return request.param\n\n\n# Generate cartesian product of tz_aware_fixture:\ntz_aware_fixture2 = tz_aware_fixture\n\n\n# ----------------------------------------------------------------\n# Dtypes\n# ----------------------------------------------------------------\n\nUNSIGNED_INT_DTYPES = [\"uint8\", \"uint16\", \"uint32\", \"uint64\"]\nUNSIGNED_EA_INT_DTYPES = [\"UInt8\", \"UInt16\", \"UInt32\", \"UInt64\"]\nSIGNED_INT_DTYPES = [int, \"int8\", \"int16\", \"int32\", \"int64\"]\nSIGNED_EA_INT_DTYPES = [\"Int8\", \"Int16\", \"Int32\", \"Int64\"]\nALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES\nALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES\n\nFLOAT_DTYPES = [float, \"float32\", \"float64\"]\nCOMPLEX_DTYPES = [complex, \"complex64\", \"complex128\"]\nSTRING_DTYPES = [str, \"str\", \"U\"]\n\nDATETIME64_DTYPES = [\"datetime64[ns]\", \"M8[ns]\"]\nTIMEDELTA64_DTYPES = [\"timedelta64[ns]\", \"m8[ns]\"]\n\nBOOL_DTYPES = [bool, \"bool\"]\nBYTES_DTYPES = [bytes, \"bytes\"]\nOBJECT_DTYPES = [object, \"object\"]\n\nALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES\nALL_NUMPY_DTYPES = (\n ALL_REAL_DTYPES\n + COMPLEX_DTYPES\n + STRING_DTYPES\n + DATETIME64_DTYPES\n + TIMEDELTA64_DTYPES\n + BOOL_DTYPES\n + OBJECT_DTYPES\n + BYTES_DTYPES\n)\n\n\[email protected](params=STRING_DTYPES)\ndef string_dtype(request):\n \"\"\"\n Parametrized fixture for string dtypes.\n\n * str\n * 'str'\n * 'U'\n \"\"\"\n return request.param\n\n\[email protected](params=BYTES_DTYPES)\ndef bytes_dtype(request):\n \"\"\"\n Parametrized fixture for bytes dtypes.\n\n * bytes\n * 'bytes'\n \"\"\"\n return request.param\n\n\[email protected](params=OBJECT_DTYPES)\ndef object_dtype(request):\n \"\"\"\n Parametrized fixture for object dtypes.\n\n * object\n * 'object'\n \"\"\"\n return request.param\n\n\[email protected](params=DATETIME64_DTYPES)\ndef datetime64_dtype(request):\n \"\"\"\n Parametrized fixture for datetime64 dtypes.\n\n * 'datetime64[ns]'\n * 'M8[ns]'\n \"\"\"\n return request.param\n\n\[email protected](params=TIMEDELTA64_DTYPES)\ndef timedelta64_dtype(request):\n \"\"\"\n Parametrized fixture for timedelta64 dtypes.\n\n * 'timedelta64[ns]'\n * 'm8[ns]'\n \"\"\"\n return request.param\n\n\[email protected](params=FLOAT_DTYPES)\ndef float_dtype(request):\n \"\"\"\n Parameterized fixture for float dtypes.\n\n * float\n * 'float32'\n * 'float64'\n \"\"\"\n return request.param\n\n\[email protected](params=COMPLEX_DTYPES)\ndef complex_dtype(request):\n \"\"\"\n Parameterized fixture for complex dtypes.\n\n * complex\n * 'complex64'\n * 'complex128'\n \"\"\"\n return request.param\n\n\[email protected](params=SIGNED_INT_DTYPES)\ndef sint_dtype(request):\n \"\"\"\n Parameterized fixture for signed integer dtypes.\n\n * int\n * 'int8'\n * 'int16'\n * 'int32'\n * 'int64'\n \"\"\"\n return request.param\n\n\[email protected](params=UNSIGNED_INT_DTYPES)\ndef uint_dtype(request):\n \"\"\"\n Parameterized fixture for unsigned integer dtypes.\n\n * 'uint8'\n * 'uint16'\n * 'uint32'\n * 'uint64'\n \"\"\"\n return request.param\n\n\[email protected](params=ALL_INT_DTYPES)\ndef any_int_dtype(request):\n \"\"\"\n Parameterized fixture for any integer dtype.\n\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n \"\"\"\n return request.param\n\n\[email protected](params=ALL_EA_INT_DTYPES)\ndef any_nullable_int_dtype(request):\n \"\"\"\n Parameterized fixture for any nullable integer dtype.\n\n * 'UInt8'\n * 'Int8'\n * 'UInt16'\n * 'Int16'\n * 'UInt32'\n * 'Int32'\n * 'UInt64'\n * 'Int64'\n \"\"\"\n return request.param\n\n\[email protected](params=ALL_REAL_DTYPES)\ndef any_real_dtype(request):\n \"\"\"\n Parameterized fixture for any (purely) real numeric dtype.\n\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n * float\n * 'float32'\n * 'float64'\n \"\"\"\n return request.param\n\n\[email protected](params=ALL_NUMPY_DTYPES)\ndef any_numpy_dtype(request):\n \"\"\"\n Parameterized fixture for all numpy dtypes.\n\n * bool\n * 'bool'\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n * float\n * 'float32'\n * 'float64'\n * complex\n * 'complex64'\n * 'complex128'\n * str\n * 'str'\n * 'U'\n * bytes\n * 'bytes'\n * 'datetime64[ns]'\n * 'M8[ns]'\n * 'timedelta64[ns]'\n * 'm8[ns]'\n * object\n * 'object'\n \"\"\"\n return request.param\n\n\n# categoricals are handled separately\n_any_skipna_inferred_dtype = [\n (\"string\", [\"a\", np.nan, \"c\"]),\n (\"string\", [\"a\", pd.NA, \"c\"]),\n (\"bytes\", [b\"a\", np.nan, b\"c\"]),\n (\"empty\", [np.nan, np.nan, np.nan]),\n (\"empty\", []),\n (\"mixed-integer\", [\"a\", np.nan, 2]),\n (\"mixed\", [\"a\", np.nan, 2.0]),\n (\"floating\", [1.0, np.nan, 2.0]),\n (\"integer\", [1, np.nan, 2]),\n (\"mixed-integer-float\", [1, np.nan, 2.0]),\n (\"decimal\", [Decimal(1), np.nan, Decimal(2)]),\n (\"boolean\", [True, np.nan, False]),\n (\"boolean\", [True, pd.NA, False]),\n (\"datetime64\", [np.datetime64(\"2013-01-01\"), np.nan, np.datetime64(\"2018-01-01\")]),\n (\"datetime\", [pd.Timestamp(\"20130101\"), np.nan, pd.Timestamp(\"20180101\")]),\n (\"date\", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]),\n # The following two dtypes are commented out due to GH 23554\n # ('complex', [1 + 1j, np.nan, 2 + 2j]),\n # ('timedelta64', [np.timedelta64(1, 'D'),\n # np.nan, np.timedelta64(2, 'D')]),\n (\"timedelta\", [timedelta(1), np.nan, timedelta(2)]),\n (\"time\", [time(1), np.nan, time(2)]),\n (\"period\", [pd.Period(2013), pd.NaT, pd.Period(2018)]),\n (\"interval\", [pd.Interval(0, 1), np.nan, pd.Interval(0, 2)]),\n]\nids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id\n\n\[email protected](params=_any_skipna_inferred_dtype, ids=ids)\ndef any_skipna_inferred_dtype(request):\n \"\"\"\n Fixture for all inferred dtypes from _libs.lib.infer_dtype\n\n The covered (inferred) types are:\n * 'string'\n * 'empty'\n * 'bytes'\n * 'mixed'\n * 'mixed-integer'\n * 'mixed-integer-float'\n * 'floating'\n * 'integer'\n * 'decimal'\n * 'boolean'\n * 'datetime64'\n * 'datetime'\n * 'date'\n * 'timedelta'\n * 'time'\n * 'period'\n * 'interval'\n\n Returns\n -------\n inferred_dtype : str\n The string for the inferred dtype from _libs.lib.infer_dtype\n values : np.ndarray\n An array of object dtype that will be inferred to have\n `inferred_dtype`\n\n Examples\n --------\n >>> import pandas._libs.lib as lib\n >>>\n >>> def test_something(any_skipna_inferred_dtype):\n ... inferred_dtype, values = any_skipna_inferred_dtype\n ... # will pass\n ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype\n \"\"\"\n inferred_dtype, values = request.param\n values = np.array(values, dtype=object) # object dtype to avoid casting\n\n # correctness of inference tested in tests/dtypes/test_inference.py\n return inferred_dtype, values\n\n\[email protected](\n params=[\n getattr(pd.offsets, o)\n for o in pd.offsets.__all__\n if issubclass(getattr(pd.offsets, o), pd.offsets.Tick)\n ]\n)\ndef tick_classes(request):\n \"\"\"\n Fixture for Tick based datetime offsets available for a time series.\n \"\"\"\n return request.param\n\n\n# ----------------------------------------------------------------\n# Global setup for tests using Hypothesis\n\n\n# Registering these strategies makes them globally available via st.from_type,\n# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py\nfor name in \"MonthBegin MonthEnd BMonthBegin BMonthEnd\".split():\n cls = getattr(pd.tseries.offsets, name)\n st.register_type_strategy(\n cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans())\n )\n\nfor name in \"YearBegin YearEnd BYearBegin BYearEnd\".split():\n cls = getattr(pd.tseries.offsets, name)\n st.register_type_strategy(\n cls,\n st.builds(\n cls,\n n=st.integers(-5, 5),\n normalize=st.booleans(),\n month=st.integers(min_value=1, max_value=12),\n ),\n )\n\nfor name in \"QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd\".split():\n cls = getattr(pd.tseries.offsets, name)\n st.register_type_strategy(\n cls,\n st.builds(\n cls,\n n=st.integers(-24, 24),\n normalize=st.booleans(),\n startingMonth=st.integers(min_value=1, max_value=12),\n ),\n )\n\n\[email protected]\ndef datetime_series():\n \"\"\"\n Fixture for Series of floats with DatetimeIndex\n \"\"\"\n s = tm.makeTimeSeries()\n s.name = \"ts\"\n return s\n\n\[email protected]\ndef float_frame():\n \"\"\"\n Fixture for DataFrame of floats with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D'].\n\n A B C D\n P7GACiRnxd -0.465578 -0.361863 0.886172 -0.053465\n qZKh6afn8n -0.466693 -0.373773 0.266873 1.673901\n tkp0r6Qble 0.148691 -0.059051 0.174817 1.598433\n wP70WOCtv8 0.133045 -0.581994 -0.992240 0.261651\n M2AeYQMnCz -1.207959 -0.185775 0.588206 0.563938\n QEPzyGDYDo -0.381843 -0.758281 0.502575 -0.565053\n r78Jwns6dn -0.653707 0.883127 0.682199 0.206159\n ... ... ... ... ...\n IHEGx9NO0T -0.277360 0.113021 -1.018314 0.196316\n lPMj8K27FA -1.313667 -0.604776 -1.305618 -0.863999\n qa66YMWQa5 1.110525 0.475310 -0.747865 0.032121\n yOa0ATsmcE -0.431457 0.067094 0.096567 -0.264962\n 65znX3uRNG 1.528446 0.160416 -0.109635 -0.032987\n eCOBvKqf3e 0.235281 1.622222 0.781255 0.392871\n xSucinXxuV -1.263557 0.252799 -0.552247 0.400426\n\n [30 rows x 4 columns]\n \"\"\"\n return DataFrame(tm.getSeriesData())\n\n\[email protected](params=[pd.Index, pd.Series], ids=[\"index\", \"series\"])\ndef index_or_series(request):\n \"\"\"\n Fixture to parametrize over Index and Series, made necessary by a mypy\n bug, giving an error:\n\n List item 0 has incompatible type \"Type[Series]\"; expected \"Type[PandasObject]\"\n\n See GH#29725\n \"\"\"\n return request.param\n\n\[email protected]\ndef dict_subclass():\n \"\"\"\n Fixture for a dictionary subclass.\n \"\"\"\n\n class TestSubDict(dict):\n def __init__(self, *args, **kwargs):\n dict.__init__(self, *args, **kwargs)\n\n return TestSubDict\n\n\[email protected]\ndef non_mapping_dict_subclass():\n \"\"\"\n Fixture for a non-mapping dictionary subclass.\n \"\"\"\n\n class TestNonDictMapping(abc.Mapping):\n def __init__(self, underlying_dict):\n self._data = underlying_dict\n\n def __getitem__(self, key):\n return self._data.__getitem__(key)\n\n def __iter__(self):\n return self._data.__iter__()\n\n def __len__(self):\n return self._data.__len__()\n\n return TestNonDictMapping\n\n\ndef _gen_mi():\n # a MultiIndex used to test the general functionality of this object\n\n # See Also: tests.multi.conftest.idx\n major_axis = Index([\"foo\", \"bar\", \"baz\", \"qux\"])\n minor_axis = Index([\"one\", \"two\"])\n\n major_codes = np.array([0, 0, 1, 2, 3, 3])\n minor_codes = np.array([0, 1, 0, 1, 0, 1])\n index_names = [\"first\", \"second\"]\n mi = MultiIndex(\n levels=[major_axis, minor_axis],\n codes=[major_codes, minor_codes],\n names=index_names,\n verify_integrity=False,\n )\n return mi\n\n\nindices_dict = {\n \"unicode\": tm.makeUnicodeIndex(100),\n \"string\": tm.makeStringIndex(100),\n \"datetime\": tm.makeDateIndex(100),\n \"datetime-tz\": tm.makeDateIndex(100, tz=\"US/Pacific\"),\n \"period\": tm.makePeriodIndex(100),\n \"timedelta\": tm.makeTimedeltaIndex(100),\n \"int\": tm.makeIntIndex(100),\n \"uint\": tm.makeUIntIndex(100),\n \"range\": tm.makeRangeIndex(100),\n \"float\": tm.makeFloatIndex(100),\n \"bool\": tm.makeBoolIndex(2),\n \"categorical\": tm.makeCategoricalIndex(100),\n \"interval\": tm.makeIntervalIndex(100),\n \"empty\": Index([]),\n \"tuples\": MultiIndex.from_tuples(zip([\"foo\", \"bar\", \"baz\"], [1, 2, 3])),\n \"multi\": _gen_mi(),\n \"repeats\": Index([0, 0, 1, 1, 2, 2]),\n}\n\n\[email protected](params=indices_dict.keys())\ndef indices(request):\n # copy to avoid mutation, e.g. setting .name\n return indices_dict[request.param].copy()\n\n\ndef _create_series(index):\n \"\"\" Helper for the _series dict \"\"\"\n size = len(index)\n data = np.random.randn(size)\n return pd.Series(data, index=index, name=\"a\")\n\n\n_series = {\n f\"series-with-{index_id}-index\": _create_series(index)\n for index_id, index in indices_dict.items()\n}\n\n\n_narrow_dtypes = [\n np.float16,\n np.float32,\n np.int8,\n np.int16,\n np.int32,\n np.uint8,\n np.uint16,\n np.uint32,\n]\n_narrow_series = {\n f\"{dtype.__name__}-series\": tm.makeFloatSeries(name=\"a\").astype(dtype)\n for dtype in _narrow_dtypes\n}\n\n_index_or_series_objs = {**indices_dict, **_series, **_narrow_series}\n\n\[email protected](params=_index_or_series_objs.keys())\ndef index_or_series_obj(request):\n \"\"\"\n Fixture for tests on indexes, series and series with a narrow dtype\n copy to avoid mutation, e.g. setting .name\n \"\"\"\n return _index_or_series_objs[request.param].copy(deep=True)\n"
] |
[
[
"pandas.Series",
"pandas._testing.makeBoolIndex",
"pandas._testing.makeRangeIndex",
"numpy.random.randn",
"pandas._testing.makeDateIndex",
"pandas._testing.makePeriodIndex",
"pandas._testing.makeIntervalIndex",
"pandas._testing.makeTimeSeries",
"pandas._testing.makeFloatIndex",
"pandas.set_option",
"pandas._testing.getSeriesData",
"pandas._testing.makeTimedeltaIndex",
"pandas.core.indexes.api.Index",
"pandas._testing.makeFloatSeries",
"pandas.core.base.SelectionMixin._cython_table.items",
"pandas._testing.makeIntIndex",
"pandas.Interval",
"numpy.array",
"pandas._testing.makeStringIndex",
"pandas._testing.makeUIntIndex",
"pandas._testing.makeCategoricalIndex",
"numpy.datetime64",
"pandas.core.indexes.api.MultiIndex",
"pandas.Period",
"pandas._testing.makeUnicodeIndex",
"pandas.Timestamp",
"numpy.float"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liragabriel/DS
|
[
"d75402d5c11dc9c6832260e49b591128fbc1b9ca"
] |
[
"netstats/lista_dataframe.py"
] |
[
"import pandas as pd\nfrom netstats.fsan import Fsan\n\n\nclass ListaDataframe:\n\n def __init__(self, operacao):\n self.operacao = operacao\n\n\n def dataframe(self):\n\n \"\"\"\n Retorna uma lista de dataframes por FSAN, cada dataframe contém as operações realizadas\n\n com a FSAN.\n\n Returns\n -------\n list\n \"\"\"\n\n fsan = Fsan(self.operacao).lista_de_fsans()\n\n sequencia = []\n for i in fsan:\n lista = []\n for j in self.operacao.operacao:\n if i in j or i+':' in j:\n lista.append(j)\n sequencia.append(lista)\n\n lista_data = []\n for i in sequencia:\n lista_data.append(pd.DataFrame(i))\n pd.set_option('display.max_colwidth', -1)\n\n for i in range(len(lista_data)):\n lista_data[i].columns = [fsan[i]]\n\n return lista_data\n"
] |
[
[
"pandas.set_option",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
haddocking/disvis
|
[
"a922bd079b41ad5ef3ac33f4e68968f8978626d2"
] |
[
"disvis/IO/mmcif.py"
] |
[
"from __future__ import print_function\nimport sys\nfrom collections import OrderedDict\nimport numpy as np\n\ndef parse_cif(infile):\n if isinstance(infile, file):\n pass\n elif isinstance(infile, str):\n infile = open(infile)\n else:\n raise TypeError(\"Input should either be a file or string.\")\n\n atom_site = OrderedDict()\n with infile as f:\n for line in f:\n \n if line.startswith('_atom_site.'):\n words = line.split('.')\n atom_site[words[1].strip()] = [] \n\n if line.startswith('ATOM'):\n words = line.split()\n for key, word in zip(atom_site, words):\n atom_site[key].append(word)\n\n natoms = len(atom_site['id'])\n dtype = [('atom_id', np.int64), ('name', np.str_, 4), \n ('resn', np.str_, 4), ('chain', np.str_, 2), \n ('resi', np.int64), ('x', np.float64),\n ('y', np.float64), ('z', np.float64), \n ('occupancy', np.float64), ('bfactor', np.float64),\n ('element', np.str_, 2), ('charge', np.str_, 2),\n ('model', np.int64),\n ]\n\n cifdata = np.zeros(natoms, dtype=dtype)\n cifdata['atom_id'] = np.asarray(atom_site['id'], dtype=np.int64)\n cifdata['name'] = atom_site['label_atom_id']\n cifdata['resn'] = atom_site['label_comp_id']\n cifdata['chain'] = atom_site['label_asym_id']\n cifdata['resi'] = atom_site['label_seq_id']\n cifdata['x'] = atom_site['Cartn_x']\n cifdata['y'] = atom_site['Cartn_y']\n cifdata['z'] = atom_site['Cartn_z']\n cifdata['occupancy'] = atom_site['occupancy']\n cifdata['bfactor'] = atom_site['B_iso_or_equiv']\n cifdata['element'] = atom_site['type_symbol'].title()\n cifdata['charge'] = atom_site['pdbx_formal_charge']\n cifdata['model'] = atom_site['pdbx_PDB_model_num']\n\n return cifdata\n\nif __name__=='__main__': \n import sys\n infile = sys.argv[1]\n data = parse_cif(infile)\n\n"
] |
[
[
"numpy.asarray",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kgrozdanic/lumen-data-science-2022
|
[
"115e14d8502210c662a68913365dc9c1179c3998",
"115e14d8502210c662a68913365dc9c1179c3998"
] |
[
"src/data/outlier_detection.py",
"src/models/coord_prediction_utils.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport cv2\nfrom imutils import paths\nimport argparse\nimport pickle\nimport vptree\nimport matplotlib.pyplot as plt\nimport time\nfrom tqdm import tqdm\nimport os\nfrom skimage.io import imread, imshow\nimport seaborn as sns\nfrom src.helpers import *\n\nMAIN_PATH = \"../../data/full_75x75/train\"\nPATH_224 = \"../../data/full_170x170/train\"\n\nPATH = \"../../data/full_170x170/\"\n\nrgb = [\"R\", \"G\", \"B\"]\nsuffix = [\"mean\", \"relative\", \"std\"]\nfeature_cols = [r + '_' + s for s in suffix for r in rgb] + [\"mean\", \"std\"]\n\nposition = [\"\", \"_NE\", \"_NW\", \"_SW\", \"_SE\", \"_center\"]\ncols = [col + pos for pos in position for col in feature_cols]\n\n\ndef show_imgs(uuids):\n fig, ax = plt.subplots(len(uuids), 4, figsize=(16, 4 * len(uuids)))\n\n for i, uuid in enumerate(uuids):\n img_path = PATH_224 + \"/\" + uuid\n for j, angle in enumerate([0, 90, 180, 270]):\n path = f\"{img_path}/{angle}.jpg\"\n img = imread(path)\n if len(uuids) == 1:\n ax[j].imshow(img, cmap=plt.cm.gray)\n else:\n ax[i][j].imshow(img, cmap=plt.cm.gray)\n plt.tight_layout()\n\n\ndef generate_features() -> pd.DataFrame:\n df = pd.DataFrame(columns=[\"uuid\", \"angle\", *cols])\n imagePaths = list(paths.list_images(MAIN_PATH))\n\n # Loading and hashing the images\n for img_path in tqdm(imagePaths):\n original_image = imread(img_path)\n features = []\n\n img_path = os.path.normpath(img_path).split(\"\\\\\")\n uuid = img_path[-2]\n angle = img_path[-1].split('.')[0]\n\n for pos in position:\n if pos == \"_NW\":\n image = original_image[:40, :40]\n elif pos == \"_NE\":\n image = original_image[:40, 40:]\n elif pos == \"_SE\":\n image = original_image[40:, 40:]\n elif pos == \"_SW\":\n image = original_image[40:, :40]\n elif pos == \"_center\":\n image = original_image[20:60, 20:60]\n else:\n image = original_image\n\n f_mean = image.sum((0, 1)) / (image.shape[0] * image.shape[1])\n f_relative = image.sum((0, 1)) / (image.sum((0, 1)).sum() - image.sum((0, 1)))\n f_std = image.std((0, 1))\n\n M = image.mean()\n S = image.std()\n\n features += [*f_mean, *f_relative, *f_std, M, S]\n\n df.loc[len(df.index)] = [uuid, angle, *features]\n return df\n\n\ndef detect_possible_outliers(df: pd.DataFrame) -> pd.DataFrame:\n print('Detection possible outliers')\n N_L = 90\n N_s = 90\n\n figures_path = '../../reports/outliers/'\n possible_outliers = set()\n\n for pos in tqdm(position):\n for col in feature_cols:\n indices = list(set(df[col].nlargest(n=N_L).index).difference(possible_outliers))\n uuids = df.loc[indices, \"uuid\"].tolist()\n possible_outliers.update(uuids)\n\n indices = list(set(df[col].nsmallest(n=N_s).index).difference(possible_outliers))\n uuids = df.loc[indices, \"uuid\"].tolist()\n possible_outliers.update(uuids)\n\n N_L += 7\n N_s += 7\n\n possible_outliers = list(possible_outliers)\n old = list(pd.read_csv('../../reports/outlier_detection/possible_outliers.csv')['uuid'].values)\n possible_outliers = list(set(possible_outliers).difference(old))\n\n old = list(pd.read_csv('../../reports/outlier_detection/possible_outliers_full.csv')['uuid'].values)\n possible_outliers = list(set(possible_outliers).difference(old))\n\n print(f\"Found {len(possible_outliers)} possible outliers\")\n return pd.DataFrame({\"uuid\": possible_outliers})\n\n\n\n\n\ndef read_manual_outliers():\n # df_possible_outliers = set(pd.read_csv('../../reports/outlier_detection/possible_outliers.csv')[\"uuid\"].values.tolist())\n\n not_outliers = list(paths.list_images('../../reports/outlier_detection/sigurno_outlieri'))\n not_outliers = list(set([os.path.normpath(outlier).split(\"\\\\\")[-1].split('.')[-2] for outlier in not_outliers]))\n\n # outliers = list(df_possible_outliers.difference(not_outliers))\n\n df_true_outliers = pd.DataFrame({'uuid': not_outliers})\n df_true_outliers.to_csv('../../reports/outlier_detection/true_outliers.csv', index=False)\n\n\n print(f'Done: {len(df_true_outliers)} true outliers')\n\n\ndef main():\n command = input(\"Generate features (Y) or detect true outliers (n)?\")\n\n if command == \"Y\":\n # df = generate_features()\n # df.to_csv(\"../../reports/outlier_detection/outlier_features_full.csv\", index=False)\n\n # df = pd.read_csv(\"../../reports/outlier_detection/outlier_features_full.csv\")\n #\n # df_possible_outliers = detect_possible_outliers(df)\n # df_possible_outliers.to_csv('../../reports/outlier_detection/possible_outliers_full2.csv', index=False)\n\n figures_path = '../../reports/outlier_detection/template_matching_images/'\n generate_images_from_csv('../../reports/outlier_detection/template_matching_outlier_detection.csv', figures_path, PATH)\n print(\"Run this script again with 'n' argument.\")\n\n elif command == \"n\":\n read_manual_outliers()\n\n else:\n print(\"krivo xd\")\n\n\nif __name__ == \"__main__\":\n main()\n",
"import numpy as np\nfrom math import radians, sin, cos, asin, sqrt, acos\nimport ast\nfrom tqdm import tqdm\nimport pandas as pd\n\n\ndef agg_dicts(dicts):\n agg = {}\n for key in dicts[0]:\n values = []\n for dic in dicts:\n values.append(dic[key])\n agg[key] = np.mean(values)\n return agg\n\n\ndef dicmax(dic):\n return max(dic, key=dic.get)\n\n\ndef haversine(c1, c2):\n lon1, lat1 = c1\n lon2, lat2 = c2\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n return 2 * 6371 * asin(sqrt(a))\n\n\ndef prepare_test(name):\n test = pd.read_csv(f\"../../data/embeddings/mobilenetv3_trained_for_state_embeddings_{name}.csv\")\n points_test = test.drop(columns=[\"uuid\", \"latitude\", \"longitude\", \"coordinates\", \"state\", \"region\", \"biogeoregion\", \"very_fine\", \"best_21_predictions\", \"best_21_score\"])\n test[\"predicted_state\"] = test[\"best_21_predictions\"].apply(lambda x: ast.literal_eval(x)[0])\n test[\"best_21_predictions\"] = test[\"best_21_predictions\"].apply(lambda x: ast.literal_eval(x))\n test[\"best_21_score\"] = test[\"best_21_score\"].apply(lambda x: ast.literal_eval(x))\n test[\"probs_map\"] = test.apply(lambda row: {row[\"best_21_predictions\"][i]: row[\"best_21_score\"][i] for i in range(21)}, axis=1)\n test[\"mgprob\"] = test[\"uuid\"].apply(lambda x: dicmax(agg_dicts(test[test.uuid == x].probs_map.values)))\n\n return test, points_test\n\n\ndef test_coordinate_prediction(cp, name):\n test, points_test = prepare_test(name)\n coordinates = []\n embeddings_by_uuid = {uuid: [] for uuid in test.uuid.unique()}\n for i in tqdm(range(len(test))):\n uuid = test.iloc[i].uuid\n embedding = points_test.iloc[i]\n state = test.iloc[i][\"mgprob\"]\n embeddings_by_uuid[uuid].append((embedding, state))\n\n coordinates_by_uuid = {uuid: cp.predict(embeddings=[e[0] for e in embeddings_by_uuid[uuid]], states=[e[1] for e in embeddings_by_uuid[uuid]]) for uuid in test.uuid.unique()}\n for i in tqdm(range(len(test))):\n coordinates.append(coordinates_by_uuid[test.iloc[i].uuid])\n\n errors = [haversine(coordinates[i], ast.literal_eval(test.iloc[i].coordinates)) for i in range(len(test))]\n print(name, np.mean(errors), np.median(errors))\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"pandas.DataFrame"
],
[
"numpy.median",
"pandas.read_csv",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Keirua/blog.keiruaprod.fr
|
[
"76e6623ff3d625690e1dad02efa5e12073be5381"
] |
[
"charts/cats.py"
] |
[
"import cutecharts.charts as ctc\nimport pandas as pd\nimport numpy as np\n\n# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html\ndf=pd.read_csv('catsshouldnt.csv', sep=',')\n\n# https://github.com/cutecharts/cutecharts.py#-usage\nchart = ctc.Bar('Follower count for @catsshouldnt',width='500px',height='400px')\nchart.set_options(\n labels=list(df[\"date\"]),\n x_label=\"Date\",\n y_label=\"Follower count\" ,\n colors=['#FFF1C9','#F7B7A3','#EA5F89','#9B3192','#57167E','#47B39C','#00529B']\n)\n\nchart.add_series(\"Follower count\",list(df[\"Follower count\"]))\nchart.render()"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
holli/probability
|
[
"7a0ce5e5beff91051028258dfbc7bc6cf0c4998d",
"3e84aa840b624f4184819f1e6ce9180c7997aad9",
"7a0ce5e5beff91051028258dfbc7bc6cf0c4998d",
"7a0ce5e5beff91051028258dfbc7bc6cf0c4998d"
] |
[
"tensorflow_probability/python/bijectors/sinh_arcsinh_test.py",
"tensorflow_probability/python/bijectors/invert.py",
"discussion/nn/variational_base.py",
"tensorflow_probability/python/bijectors/scale.py"
] |
[
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for SinhArcsinh Bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python.bijectors import bijector_test_util\nfrom tensorflow_probability.python.internal import test_util\n\n\n@test_util.test_all_tf_execution_regimes\nclass SinhArcsinhTest(test_util.TestCase):\n \"\"\"Tests correctness of the power transformation.\"\"\"\n\n def testBijectorVersusNumpyRewriteOfBasicFunctions(self):\n skewness = 0.2\n tailweight = 2.0\n multiplier = 2.0 / np.sinh(np.arcsinh(2.0) * tailweight)\n bijector = tfb.SinhArcsinh(\n skewness=skewness, tailweight=tailweight, validate_args=True)\n self.assertStartsWith(bijector.name, \"sinh_arcsinh\")\n x = np.array([[[-2.01], [2.], [1e-4]]]).astype(np.float32)\n y = np.sinh((np.arcsinh(x) + skewness) * tailweight) * multiplier\n self.assertAllClose(y, self.evaluate(bijector.forward(x)))\n self.assertAllClose(x, self.evaluate(bijector.inverse(y)))\n self.assertAllClose(\n np.sum(\n np.log(np.cosh(\n np.arcsinh(y / multiplier) / tailweight - skewness)) -\n np.log(tailweight) - np.log(np.sqrt((y / multiplier)**2 + 1))\n - np.log(multiplier),\n axis=-1),\n self.evaluate(bijector.inverse_log_det_jacobian(y, event_ndims=1)),\n rtol=2e-6)\n self.assertAllClose(\n self.evaluate(-bijector.inverse_log_det_jacobian(y, event_ndims=1)),\n self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=1)),\n rtol=1e-4,\n atol=0.)\n\n def testSkew(self):\n # Will broadcast together to shape [3, 2].\n x = [-1., 1.]\n skewness = [[-1.], [0.], [1.]]\n bijector = tfb.SinhArcsinh(skewness=skewness, validate_args=True)\n y = self.evaluate(bijector.forward(x))\n\n # For skew < 0, |forward(-1)| > |forward(1)|\n self.assertGreater(np.abs(y[0, 0]), np.abs(y[0, 1]))\n\n # For skew = 0, |forward(-1)| = |forward(1)|\n self.assertAllClose(np.abs(y[1, 0]), np.abs(y[1, 1]))\n\n # For skew > 0, |forward(-1)| < |forward(1)|\n self.assertLess(np.abs(y[2, 0]), np.abs(y[2, 1]))\n\n def testKurtosis(self):\n x = np.logspace(-2, 2, 1000).astype(np.float32)\n tailweight = [[0.5], [1.0], [2.0]]\n bijector = tfb.SinhArcsinh(tailweight=tailweight, validate_args=True)\n y = self.evaluate(bijector.forward(x))\n mean = np.mean(x, axis=-1)\n stddev = np.std(x, axis=-1, ddof=0)\n kurtosis = np.mean((y - mean) ** 4, axis=-1) / (stddev ** 4)\n self.assertAllClose(kurtosis, np.sort(kurtosis))\n\n def testScalarCongruencySkewness1Tailweight0p5(self):\n bijector = tfb.SinhArcsinh(\n skewness=1.0, tailweight=0.5, validate_args=True)\n bijector_test_util.assert_scalar_congruency(\n bijector, lower_x=-2., upper_x=2.0, eval_func=self.evaluate, rtol=0.05)\n\n def testScalarCongruencySkewnessNeg1Tailweight1p5(self):\n bijector = tfb.SinhArcsinh(\n skewness=-1.0, tailweight=1.5, validate_args=True)\n bijector_test_util.assert_scalar_congruency(\n bijector, lower_x=-2., upper_x=2.0, eval_func=self.evaluate, rtol=0.05)\n\n def testBijectiveAndFiniteSkewnessNeg1Tailweight0p5(self):\n bijector = tfb.SinhArcsinh(\n skewness=-1., tailweight=0.5, validate_args=True)\n x = np.concatenate((-np.logspace(-2, 10, 1000), [0], np.logspace(\n -2, 10, 1000))).astype(np.float32)\n bijector_test_util.assert_bijective_and_finite(\n bijector, x, x, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)\n\n def testBijectiveAndFiniteSkewness1Tailweight3(self):\n bijector = tfb.SinhArcsinh(skewness=1., tailweight=3., validate_args=True)\n x = np.concatenate((-np.logspace(-2, 5, 1000), [0], np.logspace(\n -2, 5, 1000))).astype(np.float32)\n bijector_test_util.assert_bijective_and_finite(\n bijector, x, x, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)\n\n def testBijectorEndpoints(self):\n for dtype in (np.float32, np.float64):\n bijector = tfb.SinhArcsinh(\n skewness=dtype(0.), tailweight=dtype(1.), validate_args=True)\n bounds = np.array(\n [np.finfo(dtype).min, np.finfo(dtype).max], dtype=dtype)\n # Note that the above bijector is the identity bijector. Hence, the\n # log_det_jacobian will be 0. Because of this we use atol.\n bijector_test_util.assert_bijective_and_finite(\n bijector, bounds, bounds, eval_func=self.evaluate, event_ndims=0,\n atol=2e-6)\n\n def testBijectorOverRange(self):\n for dtype in (np.float32, np.float64):\n skewness = np.array([1.2, 5.], dtype=dtype)\n tailweight = np.array([2., 10.], dtype=dtype)\n # The inverse will be defined up to where sinh is valid, which is\n # arcsinh(np.finfo(dtype).max).\n log_boundary = np.log(\n np.sinh(np.arcsinh(np.finfo(dtype).max) / tailweight - skewness))\n x = np.array([\n np.logspace(-2, log_boundary[0], base=np.e, num=1000),\n np.logspace(-2, log_boundary[1], base=np.e, num=1000)\n ], dtype=dtype)\n # Ensure broadcasting works.\n x = np.swapaxes(x, 0, 1)\n multiplier = 2. / np.sinh(np.arcsinh(2.) * tailweight)\n y = np.sinh((np.arcsinh(x) + skewness) * tailweight) * multiplier\n bijector = tfb.SinhArcsinh(\n skewness=skewness, tailweight=tailweight, validate_args=True)\n\n self.assertAllClose(\n y, self.evaluate(bijector.forward(x)), rtol=1e-4, atol=0.)\n self.assertAllClose(\n x, self.evaluate(bijector.inverse(y)), rtol=1e-4, atol=0.)\n\n # On IBM PPC systems, longdouble (np.float128) is same as double except\n # that it can have more precision. Type double being of 8 bytes, can't\n # hold square of max of float64 (which is also 8 bytes).\n # Below test fails due to overflow error giving inf. This check avoids\n # that error by skipping square calculation and corresponding assert.\n\n if (np.amax(y) <= np.sqrt(np.finfo(np.float128).max) and\n np.fabs(np.amin(y)) <= np.sqrt(np.fabs(np.finfo(np.float128).min))):\n\n # Do the numpy calculation in float128 to avoid inf/nan.\n y_float128 = np.float128(y)\n self.assertAllClose(\n np.log(np.cosh(\n np.arcsinh(y_float128 / multiplier)\n / tailweight - skewness) / np.sqrt(\n (y_float128 / multiplier)**2 + 1))\n - np.log(tailweight) - np.log(multiplier),\n self.evaluate(\n bijector.inverse_log_det_jacobian(y, event_ndims=0)),\n rtol=1e-4,\n atol=0.)\n self.assertAllClose(\n self.evaluate(-bijector.inverse_log_det_jacobian(y, event_ndims=0)),\n self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=0)),\n rtol=1e-4,\n atol=0.)\n\n def testZeroTailweightRaises(self):\n with self.assertRaisesOpError(\"Argument `tailweight` must be positive\"):\n self.evaluate(\n tfb.SinhArcsinh(tailweight=0., validate_args=True).forward(1.0))\n\n def testDefaultDtypeIsFloat32(self):\n bijector = tfb.SinhArcsinh()\n self.assertEqual(bijector.tailweight.dtype, np.float32)\n self.assertEqual(bijector.skewness.dtype, np.float32)\n\n def testVariableTailweight(self):\n x = tf.Variable(1.)\n b = tfb.SinhArcsinh(tailweight=x, validate_args=True)\n self.evaluate(x.initializer)\n self.assertIs(x, b.tailweight)\n self.assertEqual((), self.evaluate(b.forward(0.5)).shape)\n with self.assertRaisesOpError(\"Argument `tailweight` must be positive.\"):\n with tf.control_dependencies([x.assign(-1.)]):\n self.assertEqual((), self.evaluate(b.forward(0.5)).shape)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Invert bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector as bijector_lib\n\n__all__ = [\n \"Invert\",\n]\n\n\nclass Invert(bijector_lib.Bijector):\n \"\"\"Bijector which inverts another Bijector.\n\n Example Use: [ExpGammaDistribution (see Background & Context)](\n https://reference.wolfram.com/language/ref/ExpGammaDistribution.html)\n models `Y=log(X)` where `X ~ Gamma`.\n\n ```python\n exp_gamma_distribution = TransformedDistribution(\n distribution=Gamma(concentration=1., rate=2.),\n bijector=bijector.Invert(bijector.Exp())\n ```\n\n \"\"\"\n\n def __init__(self, bijector, validate_args=False, name=None):\n \"\"\"Creates a `Bijector` which swaps the meaning of `inverse` and `forward`.\n\n Note: An inverted bijector's `inverse_log_det_jacobian` is often more\n efficient if the base bijector implements `_forward_log_det_jacobian`. If\n `_forward_log_det_jacobian` is not implemented then the following code is\n used:\n\n ```python\n y = self.inverse(x, **kwargs)\n return -self.inverse_log_det_jacobian(y, **kwargs)\n ```\n\n Args:\n bijector: Bijector instance.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str`, name given to ops managed by this object.\n \"\"\"\n\n if not bijector._is_injective: # pylint: disable=protected-access\n raise NotImplementedError(\n \"Invert is not implemented for non-injective bijectors.\")\n\n name = name or \"_\".join([\"invert\", bijector.name])\n with tf.name_scope(name) as name:\n self._bijector = bijector\n super(Invert, self).__init__(\n forward_min_event_ndims=bijector.inverse_min_event_ndims,\n inverse_min_event_ndims=bijector.forward_min_event_ndims,\n is_constant_jacobian=bijector.is_constant_jacobian,\n validate_args=validate_args,\n dtype=bijector.dtype,\n name=name)\n\n def forward_event_shape(self, input_shape):\n return self.bijector.inverse_event_shape(input_shape)\n\n def forward_event_shape_tensor(self, input_shape):\n return self.bijector.inverse_event_shape_tensor(input_shape)\n\n def inverse_event_shape(self, output_shape):\n return self.bijector.forward_event_shape(output_shape)\n\n def inverse_event_shape_tensor(self, output_shape):\n return self.bijector.forward_event_shape_tensor(output_shape)\n\n @property\n def bijector(self):\n return self._bijector\n\n def _internal_is_increasing(self, **kwargs):\n return self.bijector._internal_is_increasing(**kwargs) # pylint: disable=protected-access\n\n def forward(self, x, **kwargs):\n return self.bijector.inverse(x, **kwargs)\n\n def inverse(self, y, **kwargs):\n return self.bijector.forward(y, **kwargs)\n\n def inverse_log_det_jacobian(self, y, event_ndims, **kwargs):\n return self.bijector.forward_log_det_jacobian(y, event_ndims, **kwargs)\n\n def forward_log_det_jacobian(self, x, event_ndims, **kwargs):\n return self.bijector.inverse_log_det_jacobian(x, event_ndims, **kwargs)\n",
"# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Base class for variational layers for building neural networks.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow.compat.v2 as tf\n\nfrom discussion.nn import layers as layers_lib\nfrom tensorflow_probability.python.distributions import distribution as distribution_lib\nfrom tensorflow_probability.python.distributions import independent as independent_lib\nfrom tensorflow_probability.python.distributions import kullback_leibler as kl_lib\nfrom tensorflow_probability.python.distributions import mvn_diag as mvn_diag_lib\nfrom tensorflow_probability.python.distributions import normal as normal_lib\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.internal.reparameterization import FULLY_REPARAMETERIZED\nfrom tensorflow_probability.python.math.random_ops import random_rademacher\nfrom tensorflow_probability.python.monte_carlo import expectation\nfrom tensorflow_probability.python.util.seed_stream import SeedStream\n\n\n__all__ = [\n 'VariationalLayer',\n]\n\n\n# The following aliases ensure docstrings read more succinctly.\ntfd = distribution_lib\n\n\ndef kl_divergence_monte_carlo(q, r, w):\n \"\"\"Monte Carlo KL Divergence.\"\"\"\n return expectation(\n lambda w: q.log_prob(w) - r.log_prob(w),\n samples=w,\n log_prob=q.log_prob,\n use_reparameterization=all(\n rt == FULLY_REPARAMETERIZED\n for rt in tf.nest.flatten(q.reparameterization_type)),\n axis=())\n\n\ndef kl_divergence_exact(q, r, w):\n \"\"\"Exact KL Divergence.\"\"\"\n del w\n return kl_lib.kl_divergence(q, r)\n\n\ndef unpack_kernel_and_bias(weights):\n \"\"\"Returns `kernel`, `bias` tuple.\"\"\"\n if isinstance(weights, collections.Mapping):\n kernel = weights.get('kernel', None)\n bias = weights.get('bias', None)\n elif len(weights) == 1:\n kernel, bias = weights, None\n elif len(weights) == 2:\n kernel, bias = weights\n else:\n raise ValueError('Unable to unpack weights: {}.'.format(weights))\n return kernel, bias\n\n\nclass VariationalLayer(layers_lib.Layer):\n \"\"\"Base class for all variational layers.\"\"\"\n\n def __init__(\n self,\n posterior,\n prior,\n penalty_weight=None,\n posterior_penalty_fn=kl_divergence_monte_carlo,\n posterior_value_fn=tfd.Distribution.sample,\n seed=None,\n dtype=tf.float32,\n name=None):\n \"\"\"Base class for variational layers.\n\n # mean ==> penalty_weight = 1 / train_size\n # sum ==> penalty_weight = batch_size / train_size\n\n Args:\n posterior: ...\n prior: ...\n penalty_weight: ...\n posterior_penalty_fn: ...\n posterior_value_fn: ...\n seed: ...\n dtype: ...\n name: Python `str` prepeneded to ops created by this object.\n Default value: `None` (i.e., `type(self).__name__`).\n \"\"\"\n super(VariationalLayer, self).__init__(name=name)\n self._posterior = posterior\n self._prior = prior\n self._penalty_weight = penalty_weight\n self._posterior_penalty_fn = posterior_penalty_fn\n self._posterior_value_fn = posterior_value_fn\n self._seed = SeedStream(seed, salt=self.name)\n self._dtype = dtype\n tf.nest.assert_same_structure(prior.dtype, posterior.dtype,\n check_types=False)\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def posterior(self):\n return self._posterior\n\n @property\n def prior(self):\n return self._prior\n\n @property\n def penalty_weight(self):\n return self._penalty_weight\n\n @property\n def posterior_penalty_fn(self):\n return self._posterior_penalty_fn\n\n @property\n def posterior_value_fn(self):\n return self._posterior_value_fn\n\n # @tf.function(autograph=False, experimental_compile=True)\n def eval(self, inputs, is_training=True, **kwargs):\n inputs = tf.convert_to_tensor(inputs, dtype=self.dtype, name='inputs')\n w = self.posterior_value_fn(self.posterior, seed=self._seed()) # pylint: disable=not-callable\n if is_training:\n penalty = self.posterior_penalty_fn(self.posterior, self.prior, w) # pylint: disable=not-callable\n if penalty is not None and self.penalty_weight is not None:\n penalty *= tf.cast(self.penalty_weight, dtype=penalty.dtype)\n else:\n penalty = None\n outputs = self._eval(inputs, w, **kwargs)\n self._set_extra_loss(penalty)\n self._set_extra_result(w)\n return outputs, self.extra_loss, self.extra_result\n\n def _eval(self, inputs, weights):\n raise NotImplementedError('Subclass failed to implement `_eval`.')\n\n\nclass VariationalReparameterizationKernelBiasLayer(VariationalLayer):\n \"\"\"Variational reparameterization linear layer.\"\"\"\n\n def __init__(\n self,\n posterior,\n prior,\n apply_kernel_fn,\n penalty_weight=None,\n posterior_penalty_fn=kl_divergence_monte_carlo,\n posterior_value_fn=tfd.Distribution.sample,\n unpack_weights_fn=unpack_kernel_and_bias,\n seed=None,\n dtype=tf.float32,\n name=None):\n super(VariationalReparameterizationKernelBiasLayer, self).__init__(\n posterior,\n prior,\n penalty_weight=penalty_weight,\n posterior_penalty_fn=posterior_penalty_fn,\n posterior_value_fn=posterior_value_fn,\n seed=seed,\n dtype=dtype,\n name=name)\n self._apply_kernel_fn = apply_kernel_fn\n self._unpack_weights_fn = unpack_weights_fn\n\n @property\n def unpack_weights_fn(self):\n return self._unpack_weights_fn\n\n def _eval(self, x, weights):\n kernel, bias = self.unpack_weights_fn(weights) # pylint: disable=not-callable\n y = x\n if kernel is not None:\n y = self._apply_kernel_fn(y, kernel)\n if bias is not None:\n y = tf.nn.bias_add(y, bias)\n return y\n\n\nclass VariationalFlipoutKernelBiasLayer(VariationalLayer):\n \"\"\"Variational flipout linear layer.\"\"\"\n\n def __init__(\n self,\n posterior,\n prior,\n apply_kernel_fn,\n penalty_weight=None,\n posterior_penalty_fn=kl_divergence_monte_carlo,\n posterior_value_fn=tfd.Distribution.sample,\n unpack_weights_fn=unpack_kernel_and_bias,\n seed=None,\n dtype=tf.float32,\n name=None):\n super(VariationalFlipoutKernelBiasLayer, self).__init__(\n posterior,\n prior,\n penalty_weight=penalty_weight,\n posterior_penalty_fn=posterior_penalty_fn,\n posterior_value_fn=posterior_value_fn,\n seed=seed,\n dtype=dtype,\n name=name)\n self._apply_kernel_fn = apply_kernel_fn\n self._unpack_weights_fn = unpack_weights_fn\n\n @property\n def unpack_weights_fn(self):\n return self._unpack_weights_fn\n\n def _eval(self, x, weights):\n kernel, bias = self.unpack_weights_fn(weights) # pylint: disable=not-callable\n y = x\n\n if kernel is not None:\n kernel_dist, _ = self.unpack_weights_fn( # pylint: disable=not-callable\n self.posterior.sample_distributions(value=weights)[0])\n kernel_loc, kernel_scale = get_spherical_normal_loc_scale(kernel_dist)\n\n # batch_size = tf.shape(x)[0]\n # sign_input_shape = ([batch_size] +\n # [1] * self._rank +\n # [self._input_channels])\n y *= random_rademacher(prefer_static.shape(y),\n dtype=y.dtype,\n seed=self._seed())\n kernel_perturb = normal_lib.Normal(loc=0., scale=kernel_scale)\n y = self._apply_kernel_fn( # E.g., tf.matmul.\n y,\n kernel_perturb.sample(seed=self._seed()))\n y *= random_rademacher(prefer_static.shape(y),\n dtype=y.dtype,\n seed=self._seed())\n y += self._apply_kernel_fn(x, kernel_loc)\n\n if bias is not None:\n y = tf.nn.bias_add(y, bias)\n\n return y\n\n\ndef get_spherical_normal_loc_scale(d):\n if isinstance(d, independent_lib.Independent):\n return get_spherical_normal_loc_scale(d.distribution)\n if isinstance(d, (normal_lib.Normal, mvn_diag_lib.MultivariateNormalDiag)):\n return d.loc, d.scale\n raise TypeError('Expected kernel `posterior` to be spherical Normal; '\n 'saw: \"{}\".'.format(type(d).__name__))\n",
"# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Scale bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import tensor_util\n\n\n__all__ = [\n 'Scale',\n]\n\n\nclass Scale(bijector.Bijector):\n \"\"\"Compute `Y = g(X; scale) = scale * X`.\n\n Examples:\n\n ```python\n # Y = 2 * X\n b = Scale(scale=2.)\n ```\n\n \"\"\"\n\n def __init__(self,\n scale,\n validate_args=False,\n name='scale'):\n \"\"\"Instantiates the `Scale` bijector.\n\n This `Bijector`'s forward operation is:\n\n ```none\n Y = g(X) = scale * X\n ```\n\n Args:\n scale: Floating-point `Tensor`.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str` name given to ops managed by this object.\n \"\"\"\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([scale], dtype_hint=tf.float32)\n self._scale = tensor_util.convert_nonref_to_tensor(\n scale, dtype=dtype, name='scale')\n\n super(Scale, self).__init__(\n forward_min_event_ndims=0,\n is_constant_jacobian=True,\n validate_args=validate_args,\n dtype=dtype,\n name=name)\n\n @property\n def scale(self):\n \"\"\"The `scale` term in `Y = scale * X`.\"\"\"\n return self._scale\n\n def _is_increasing(self):\n return self.scale > 0\n\n def _forward(self, x):\n return x * self.scale\n\n def _inverse(self, y):\n return y / self.scale\n\n def _forward_log_det_jacobian(self, x):\n return tf.math.log(tf.abs(self.scale))\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args:\n return []\n assertions = []\n if (self.scale is not None and\n is_init != tensor_util.is_ref(self.scale)):\n assertions.append(\n assert_util.assert_none_equal(\n self.scale,\n tf.zeros([], dtype=self._scale.dtype),\n message='Argument `scale` must be non-zero.'))\n return assertions\n"
] |
[
[
"tensorflow.compat.v2.Variable",
"numpy.swapaxes",
"numpy.log",
"tensorflow.compat.v2.test.main",
"numpy.abs",
"numpy.amax",
"numpy.logspace",
"numpy.amin",
"numpy.sqrt",
"numpy.sort",
"numpy.float128",
"numpy.finfo",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.arcsinh"
],
[
"tensorflow.compat.v2.name_scope"
],
[
"tensorflow.compat.v2.nn.bias_add",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.nest.assert_same_structure"
],
[
"tensorflow.compat.v2.abs",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.name_scope"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bluetyson/discretize
|
[
"a4ead91d6a1f84658ab20946da5fa86dc9ccc831",
"a4ead91d6a1f84658ab20946da5fa86dc9ccc831"
] |
[
"tutorials/inner_products/2_physical_properties.py",
"discretize/mixins/omfModule.py"
] |
[
"\"\"\"\nConstitutive Relations\n======================\n\nWhen solving PDEs using the finite volume approach, inner products may\ncontain constitutive relations; examples include Ohm's law and Hooke's law.\nFor this class of inner products, you will learn how to:\n\n - Construct the inner-product matrix in the case of isotropic and anisotropic constitutive relations\n - Construct the inverse of the inner-product matrix\n - Work with constitutive relations defined by the reciprocal of a parameter\n\nLet :math:`\\\\vec{J}` and :math:`\\\\vec{E}` be two physically related\nquantities. If their relationship is isotropic (defined by a constant\n:math:`\\\\sigma`), then the constitutive relation is given by:\n\n.. math::\n \\\\vec{J} = \\\\sigma \\\\vec{E}\n\nThe inner product between a vector :math:`\\\\vec{v}` and the right-hand side\nof this expression is given by:\n\n.. math::\n (\\\\vec{v}, \\\\sigma \\\\vec{E} ) = \\\\int_\\\\Omega \\\\vec{v} \\\\cdot \\\\sigma \\\\vec{E} \\\\, dv\n\nJust like in the previous tutorial, we would like to approximate the inner\nproduct numerically using an *inner-product matrix* such that:\n\n.. math::\n (\\\\vec{v}, \\\\sigma \\\\vec{E} ) \\\\approx \\\\mathbf{v^T M_\\\\sigma e}\n\nwhere the inner product matrix :math:`\\\\mathbf{M_\\\\sigma}` now depends on:\n\n 1. the dimensions and discretization of the mesh\n 2. where :math:`\\\\mathbf{v}` and :math:`\\\\mathbf{e}` live\n 3. the spatial distribution of the property :math:`\\\\sigma`\n\nIn the case of anisotropy, the constitutive relations are defined by a tensor\n(:math:`\\\\Sigma`). Here, the constitutive relation is of the form:\n\n.. math::\n \\\\vec{J} = \\\\Sigma \\\\vec{E}\n\nwhere\n\n.. math::\n \\\\Sigma = \\\\begin{bmatrix} \\\\sigma_{1} & \\\\sigma_{4} & \\\\sigma_{5} \\n\n \\\\sigma_{4} & \\\\sigma_{2} & \\\\sigma_{6} \\n\n \\\\sigma_{5} & \\\\sigma_{6} & \\\\sigma_{3} \\\\end{bmatrix}\n\nIs symmetric and defined by 6 independent parameters. The inner product between\na vector :math:`\\\\vec{v}` and the right-hand side of this expression is given\nby:\n\n.. math::\n (\\\\vec{v}, \\\\Sigma \\\\vec{E} ) = \\\\int_\\\\Omega \\\\vec{v} \\\\cdot \\\\Sigma \\\\vec{E} \\\\, dv\n\nOnce again we would like to approximate the inner product numerically using an\n*inner-product matrix* :math:`\\\\mathbf{M_\\\\Sigma}` such that:\n\n.. math::\n (\\\\vec{v}, \\\\Sigma \\\\vec{E} ) \\\\approx \\\\mathbf{v^T M_\\\\Sigma e}\n \n\n\n\"\"\"\n\n####################################################\n#\n# Import Packages\n# ---------------\n#\n# Here we import the packages required for this tutorial\n#\n\nfrom discretize import TensorMesh\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# sphinx_gallery_thumbnail_number = 1\n\n#####################################################\n# Inner Product for a Single Cell\n# -------------------------------\n#\n# Here we compare the inner product matricies for a single cell when the\n# constitutive relationship is:\n# \n# - **isotropic:** :math:`\\sigma_1 = \\sigma_2 = \\sigma_3 = \\sigma` and :math:`\\sigma_4 = \\sigma_5 = \\sigma_6 = 0`; e.g. :math:`\\vec{J} = \\sigma \\vec{E}`\n# - **diagonal anisotropic:** independent parameters :math:`\\sigma_1, \\sigma_2, \\sigma_3` and :math:`\\sigma_4 = \\sigma_5 = \\sigma_6 = 0`\n# - **fully anisotropic:** independent parameters :math:`\\sigma_1, \\sigma_2, \\sigma_3, \\sigma_4, \\sigma_5, \\sigma_6`\n# \n# When approximating the inner product according to the finite volume approach,\n# the constitutive parameters are defined at cell centers; even if the\n# fields/fluxes live at cell edges/faces. As we will see, inner-product\n# matricies are generally diagonal; except for in the fully anisotropic case\n# where the inner product matrix contains a significant number of non-diagonal\n# entries.\n# \n\n# Create a single 3D cell\nh = np.ones(1)\nmesh = TensorMesh([h, h, h])\n\n# Define 6 constitutive parameters for the cell\nsig1, sig2, sig3, sig4, sig5, sig6 = 6, 5, 4, 3, 2, 1\n\n# Isotropic case\nsig = sig1*np.ones((1, 1))\nsig_tensor_1 = np.diag(sig1*np.ones(3))\nMe1 = mesh.getEdgeInnerProduct(sig) # Edges inner product matrix\nMf1 = mesh.getFaceInnerProduct(sig) # Faces inner product matrix\n\n# Diagonal anisotropic\nsig = np.c_[sig1, sig2, sig3]\nsig_tensor_2 = np.diag(np.array([sig1, sig2, sig3]))\nMe2 = mesh.getEdgeInnerProduct(sig)\nMf2 = mesh.getFaceInnerProduct(sig)\n\n# Full anisotropic\nsig = np.c_[sig1, sig2, sig3, sig4, sig5, sig6]\nsig_tensor_3 = np.diag(np.array([sig1, sig2, sig3]))\nsig_tensor_3[(0, 1), (1, 0)] = sig4\nsig_tensor_3[(0, 2), (2, 0)] = sig5\nsig_tensor_3[(1, 2), (2, 1)] = sig6\nMe3 = mesh.getEdgeInnerProduct(sig)\nMf3 = mesh.getFaceInnerProduct(sig)\n\n# Plotting matrix entries\nfig = plt.figure(figsize=(12, 12))\n\nax1 = fig.add_subplot(331)\nax1.imshow(sig_tensor_1)\nax1.set_title('Property Tensor (isotropic)')\n\nax2 = fig.add_subplot(332)\nax2.imshow(sig_tensor_2)\nax2.set_title('Property Tensor (diagonal anisotropic)')\n\nax3 = fig.add_subplot(333)\nax3.imshow(sig_tensor_3)\nax3.set_title('Property Tensor (full anisotropic)')\n\nax4 = fig.add_subplot(334)\nax4.imshow(Mf1.todense())\nax4.set_title('M-faces Matrix (isotropic)')\n\nax5 = fig.add_subplot(335)\nax5.imshow(Mf2.todense())\nax5.set_title('M-faces Matrix (diagonal anisotropic)')\n\nax6 = fig.add_subplot(336)\nax6.imshow(Mf3.todense())\nax6.set_title('M-faces Matrix (full anisotropic)')\n\nax7 = fig.add_subplot(337)\nax7.imshow(Me1.todense())\nax7.set_title('M-edges Matrix (isotropic)')\n\nax8 = fig.add_subplot(338)\nax8.imshow(Me2.todense())\nax8.set_title('M-edges Matrix (diagonal anisotropic)')\n\nax9 = fig.add_subplot(339)\nax9.imshow(Me3.todense())\nax9.set_title('M-edges Matrix (full anisotropic)')\n\n\n#############################################################\n# Spatially Variant Parameters\n# ----------------------------\n#\n# In practice, the parameter :math:`\\sigma` or tensor :math:`\\Sigma` will\n# vary spatially. In this case, we define the parameter\n# :math:`\\sigma` (or parameters :math:`\\Sigma`) for each cell. When\n# creating the inner product matrix, we enter these parameters as\n# a numpy array. This is demonstrated below. Properties of the resulting\n# inner product matricies are discussed.\n#\n\n# Create a small 3D mesh\nh = np.ones(5)\nmesh = TensorMesh([h, h, h])\n\n# Isotropic case: (nC, ) numpy array\nsig = np.random.rand(mesh.nC) # sig for each cell\nMe1 = mesh.getEdgeInnerProduct(sig) # Edges inner product matrix\nMf1 = mesh.getFaceInnerProduct(sig) # Faces inner product matrix\n\n# Linear case: (nC, dim) numpy array\nsig = np.random.rand(mesh.nC, mesh.dim)\nMe2 = mesh.getEdgeInnerProduct(sig)\nMf2 = mesh.getFaceInnerProduct(sig)\n\n# Anisotropic case: (nC, 3) for 2D and (nC, 6) for 3D\nsig = np.random.rand(mesh.nC, 6)\nMe3 = mesh.getEdgeInnerProduct(sig)\nMf3 = mesh.getFaceInnerProduct(sig)\n\n# Properties of inner product matricies\nprint('\\n FACE INNER PRODUCT MATRIX')\nprint('- Number of faces :', mesh.nF)\nprint('- Dimensions of operator :', str(mesh.nF), 'x', str(mesh.nF))\nprint('- Number non-zero (isotropic) :', str(Mf1.nnz))\nprint('- Number non-zero (linear) :', str(Mf2.nnz))\nprint('- Number non-zero (anisotropic):', str(Mf3.nnz), '\\n')\n\nprint('\\n EDGE INNER PRODUCT MATRIX')\nprint('- Number of faces :', mesh.nE)\nprint('- Dimensions of operator :', str(mesh.nE), 'x', str(mesh.nE))\nprint('- Number non-zero (isotropic) :', str(Me1.nnz))\nprint('- Number non-zero (linear) :', str(Me2.nnz))\nprint('- Number non-zero (anisotropic):', str(Me3.nnz), '\\n')\n\n\n#############################################################\n# Inverse\n# -------\n#\n# The final discretized system using the finite volume method may contain\n# the inverse of the inner-product matrix. Here we show how to call this\n# using the *invMat* keyword argument.\n#\n# For the isotropic and diagonally anisotropic cases, the inner product matrix\n# is diagonal. As a result, its inverse can be easily formed. For the full\n# anisotropic case however, we cannot expicitly form the inverse because the\n# inner product matrix contains a significant number of off-diagonal elements.\n#\n# For the isotropic and diagonal anisotropic cases we can form\n# :math:`\\mathbf{M}^{-1}` then apply it to a vector using the :math:`*`\n# operator. For the full anisotropic case, we must form the inner product\n# matrix and do a numerical solve.\n#\n\n# Create a small 3D mesh\nh = np.ones(5)\nmesh = TensorMesh([h, h, h])\n\n# Isotropic case: (nC, ) numpy array\nsig = np.random.rand(mesh.nC)\nMe1_inv = mesh.getEdgeInnerProduct(sig, invMat=True)\nMf1_inv = mesh.getFaceInnerProduct(sig, invMat=True)\n\n# Diagonal anisotropic: (nC, dim) numpy array\nsig = np.random.rand(mesh.nC, mesh.dim)\nMe2_inv = mesh.getEdgeInnerProduct(sig, invMat=True)\nMf2_inv = mesh.getFaceInnerProduct(sig, invMat=True)\n\n# Full anisotropic: (nC, 3) for 2D and (nC, 6) for 3D\nsig = np.random.rand(mesh.nC, 6)\nMe3 = mesh.getEdgeInnerProduct(sig)\nMf3 = mesh.getFaceInnerProduct(sig)\n\n\n###########################################################################\n# Reciprocal Properties\n# ---------------------\n#\n# At times, the constitutive relation may be defined by the reciprocal of\n# a parameter (:math:`\\rho`). Here we demonstrate how inner product matricies\n# can be formed using the keyword argument *invProp*. We will do this for a\n# single cell and plot the matrix elements. We can easily extend this to\n# a mesh comprised of many cells.\n#\n# In this case, the constitutive relation is given by:\n#\n# .. math::\n# \\vec{J} = \\frac{1}{\\rho} \\vec{E}\n#\n# The inner product between a vector :math:`\\\\vec{v}` and the right-hand side\n# of the expression is given by:\n#\n# .. math::\n# (\\vec{v}, \\rho^{-1} \\vec{E} ) = \\int_\\Omega \\vec{v} \\cdot \\rho^{-1} \\vec{E} \\, dv\n#\n# where the inner product is approximated using an inner product matrix\n# :math:`\\mathbf{M_{\\rho^{-1}}}` as follows:\n#\n# .. math::\n# (\\vec{v}, \\rho^{-1} \\vec{E} ) \\approx \\mathbf{v^T M_{\\rho^{-1}} e}\n#\n# In the case that the constitutive relation is defined by a\n# tensor :math:`P`, e.g.:\n#\n# .. math::\n# \\vec{J} = P \\vec{E}\n#\n# where\n#\n# .. math::\n# P = \\begin{bmatrix} \\rho_{1}^{-1} & \\rho_{4}^{-1} & \\rho_{5}^{-1} \\\\\n# \\rho_{4}^{-1} & \\rho_{2}^{-1} & \\rho_{6}^{-1} \\\\\n# \\rho_{5}^{-1} & \\rho_{6}^{-1} & \\rho_{3}^{-1} \\end{bmatrix}\n#\n# The inner product between a vector :math:`\\vec{v}` and the right-hand side of\n# this expression is given by:\n#\n# .. math::\n# (\\vec{v}, P \\vec{E} ) = \\int_\\Omega \\vec{v} \\cdot P \\vec{E} \\, dv\n#\n# Once again we would like to approximate the inner product numerically using an\n# *inner-product matrix* :math:`\\mathbf{M_P}` such that:\n#\n# .. math::\n# (\\vec{v}, P \\vec{E} ) \\approx \\mathbf{v^T M_P e}\n#\n# Here we demonstrate how to form the inner-product matricies\n# :math:`\\mathbf{M_{\\rho^{-1}}}` and :math:`\\mathbf{M_P}`.\n#\n\n# Create a small 3D mesh\nh = np.ones(1)\nmesh = TensorMesh([h, h, h])\n\n# Define 6 constitutive parameters for the cell\nrho1, rho2, rho3, rho4, rho5, rho6 = 1./6., 1./5., 1./4., 1./3., 1./2., 1\n\n# Isotropic case\nrho = rho1*np.ones((1, 1))\nMe1 = mesh.getEdgeInnerProduct(rho, invProp=True) # Edges inner product matrix\nMf1 = mesh.getFaceInnerProduct(rho, invProp=True) # Faces inner product matrix\n\n# Diagonal anisotropic case\nrho = np.c_[rho1, rho2, rho3]\nMe2 = mesh.getEdgeInnerProduct(rho, invProp=True)\nMf2 = mesh.getFaceInnerProduct(rho, invProp=True)\n\n# Full anisotropic case\nrho = np.c_[rho1, rho2, rho3, rho4, rho5, rho6]\nMe3 = mesh.getEdgeInnerProduct(rho, invProp=True)\nMf3 = mesh.getFaceInnerProduct(rho, invProp=True)\n\n# Plotting matrix entries\nfig = plt.figure(figsize=(14, 9))\n\nax1 = fig.add_subplot(231)\nax1.imshow(Mf1.todense())\nax1.set_title('Isotropic (Faces)')\n\nax2 = fig.add_subplot(232)\nax2.imshow(Mf2.todense())\nax2.set_title('Diagonal Anisotropic (Faces)')\n\nax3 = fig.add_subplot(233)\nax3.imshow(Mf3.todense())\nax3.set_title('Full Anisotropic (Faces)')\n\nax4 = fig.add_subplot(234)\nax4.imshow(Me1.todense())\nax4.set_title('Isotropic (Edges)')\n\nax5 = fig.add_subplot(235)\nax5.imshow(Me2.todense())\nax5.set_title('Diagonal Anisotropic (Edges)')\n\nax6 = fig.add_subplot(236)\nax6.imshow(Me3.todense())\nax6.set_title('Full Anisotropic (Edges)')\n",
"\"\"\"\nA class for converting ``discretize`` meshes to OMF objects\n\"\"\"\n\nimport omf\nimport numpy as np\n\n\nimport discretize\n\n\ndef ravel_data_array(arr, nx, ny, nz):\n \"\"\"Ravel's a numpy array into proper order for passing to the OMF\n specification from ``discretize``/UBC formats\n \"\"\"\n dim = (nz, ny, nx)\n return np.reshape(arr, dim, order='C').ravel(order='F')\n\n\ndef unravel_data_array(arr, nx, ny, nz):\n \"\"\"Unravel's a numpy array from the OMF specification to\n ``discretize``/UBC formats - the is the inverse of ``ravel_data_array``\n \"\"\"\n dim = (nz, ny, nx)\n return np.reshape(arr, dim, order='F').ravel(order='C')\n\n\nclass InterfaceOMF(object):\n\n\n def _tensor_mesh_to_omf(mesh, models=None):\n \"\"\"\n Constructs an :class:`omf.VolumeElement` object of this tensor mesh and\n the given models as cell data of that grid.\n\n Parameters\n ----------\n\n mesh : discretize.TensorMesh\n The tensor mesh to convert to a :class:`omf.VolumeElement`\n\n models : dict(numpy.ndarray)\n Name('s) and array('s). Match number of cells\n\n \"\"\"\n if models is None:\n models = {}\n # Make the geometry\n geometry = omf.VolumeGridGeometry()\n # Set tensors\n tensors = mesh.h\n if len(tensors) < 1:\n raise RuntimeError(\"Your mesh is empty... fill it out before converting to OMF\")\n elif len(tensors) == 1:\n geometry.tensor_u = tensors[0]\n geometry.tensor_v = np.array([0.0,])\n geometry.tensor_w = np.array([0.0,])\n elif len(tensors) == 2:\n geometry.tensor_u = tensors[0]\n geometry.tensor_v = tensors[1]\n geometry.tensor_w = np.array([0.0,])\n elif len(tensors) == 3:\n geometry.tensor_u = tensors[0]\n geometry.tensor_v = tensors[1]\n geometry.tensor_w = tensors[2]\n else:\n raise RuntimeError(\"This mesh is too high-dimensional for OMF\")\n # Set rotation axes\n geometry.axis_u = mesh.axis_u\n geometry.axis_v = mesh.axis_v\n geometry.axis_w = mesh.axis_w\n # Set the origin\n geometry.origin = mesh.x0\n # Make sure the geometry is built correctly\n geometry.validate()\n # Make the volume elemet (the OMF object)\n omfmesh = omf.VolumeElement(\n geometry=geometry,\n )\n # Add model data arrays onto the cells of the mesh\n omfmesh.data = []\n for name, arr in models.items():\n data = omf.ScalarData(name=name,\n array=ravel_data_array(arr, mesh.nCx, mesh.nCy, mesh.nCz),\n location='cells')\n omfmesh.data.append(data)\n # Validate to make sure a proper OMF object is returned to the user\n omfmesh.validate()\n return omfmesh\n\n\n def _tree_mesh_to_omf(mesh, models=None):\n raise NotImplementedError('Not possible until OMF v2 is released.')\n\n\n def _curvilinear_mesh_to_omf(mesh, models=None):\n raise NotImplementedError('Not currently possible.')\n\n\n def _cyl_mesh_to_omf(mesh, models=None):\n raise NotImplementedError('Not currently possible.')\n\n\n def to_omf(mesh, models=None):\n \"\"\"Convert this mesh object to it's proper ``omf`` data object with\n the given model dictionary as the cell data of that dataset.\n\n Parameters\n ----------\n\n models : dict(numpy.ndarray)\n Name('s) and array('s). Match number of cells\n\n \"\"\"\n # TODO: mesh.validate()\n converters = {\n # TODO: 'tree' : InterfaceOMF._tree_mesh_to_omf,\n 'tensor' : InterfaceOMF._tensor_mesh_to_omf,\n # TODO: 'curv' : InterfaceOMF._curvilinear_mesh_to_omf,\n # TODO: 'CylMesh' : InterfaceOMF._cyl_mesh_to_omf,\n }\n key = mesh._meshType.lower()\n try:\n convert = converters[key]\n except KeyError:\n raise RuntimeError('Mesh type `{}` is not currently supported for OMF conversion.'.format(key))\n # Convert the data object\n return convert(mesh, models=models)\n\n\n @staticmethod\n def _omf_volume_to_tensor(element):\n \"\"\"Convert an :class:`omf.VolumeElement` to :class:`discretize.TensorMesh`\n \"\"\"\n geometry = element.geometry\n h = [geometry.tensor_u, geometry.tensor_v, geometry.tensor_w]\n mesh = discretize.TensorMesh(h)\n mesh.axis_u = geometry.axis_u\n mesh.axis_v = geometry.axis_v\n mesh.axis_w = geometry.axis_w\n mesh.x0 = geometry.origin\n\n data_dict = {}\n for data in element.data:\n # NOTE: this is agnostic about data location - i.e. nodes vs cells\n data_dict[data.name] = unravel_data_array(np.array(data.array), mesh.nCx, mesh.nCy, mesh.nCz)\n\n # Return TensorMesh and data dictionary\n return mesh, data_dict\n\n\n @staticmethod\n def from_omf(element):\n \"\"\"Convert an OMF element to it's proper ``discretize`` type.\n Automatically determines the output type. Returns both the mesh and a\n dictionary of model arrays.\n \"\"\"\n element.validate()\n converters = {\n omf.VolumeElement.__name__ : InterfaceOMF._omf_volume_to_tensor,\n }\n key = element.__class__.__name__\n try:\n convert = converters[key]\n except KeyError:\n raise RuntimeError('OMF type `{}` is not currently supported for conversion.'.format(key))\n # Convert the data object\n return convert(element)\n"
] |
[
[
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.random.rand",
"numpy.ones"
],
[
"numpy.reshape",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kalnun/pandas-ta
|
[
"60b6cc42f6c53bfdc18fe77e9d70a00712ce3149"
] |
[
"pandas_ta/volatility/true_range.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom pandas import DataFrame\nfrom ..utils import get_drift, get_offset, non_zero_range, verify_series\n\ndef true_range(high, low, close, drift=None, offset=None, **kwargs):\n \"\"\"Indicator: True Range\"\"\"\n # Validate arguments\n high = verify_series(high)\n low = verify_series(low)\n close = verify_series(close)\n high_low_range = non_zero_range(high, low)\n drift = get_drift(drift)\n offset = get_offset(offset)\n\n # Calculate Result\n prev_close = close.shift(drift)\n ranges = [high_low_range, high - prev_close, prev_close - low]\n true_range = DataFrame(ranges).T\n true_range = true_range.abs().max(axis=1)\n\n # Offset\n if offset != 0:\n true_range = true_range.shift(offset)\n\n # Handle fills\n if \"fillna\" in kwargs:\n true_range.fillna(kwargs[\"fillna\"], inplace=True)\n if \"fill_method\" in kwargs:\n true_range.fillna(method=kwargs[\"fill_method\"], inplace=True)\n\n # Name and Categorize it\n true_range.name = f\"TRUERANGE_{drift}\"\n true_range.category = \"volatility\"\n\n return true_range\n\n\n\ntrue_range.__doc__ = \\\n\"\"\"True Range\n\nAn method to expand a classical range (high minus low) to include\npossible gap scenarios.\n\nSources:\n https://www.macroption.com/true-range/\n\nCalculation:\n Default Inputs:\n drift=1\n ABS = Absolute Value\n prev_close = close.shift(drift)\n TRUE_RANGE = ABS([high - low, high - prev_close, low - prev_close]) \n\nArgs:\n high (pd.Series): Series of 'high's\n low (pd.Series): Series of 'low's\n close (pd.Series): Series of 'close's\n drift (int): The shift period. Default: 1\n offset (int): How many periods to offset the result. Default: 0\n\nKwargs:\n fillna (value, optional): pd.DataFrame.fillna(value)\n fill_method (value, optional): Type of fill method\n\nReturns:\n pd.Series: New feature\n\"\"\""
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
CRingrose94/geomeTRIC
|
[
"5d8eada8c0fafc4aa354adae4a2d84b5b8d943b2"
] |
[
"geometric/tests/test_batch_opt.py"
] |
[
"\"\"\"\nA set of tests for using the QCEngine project\n\"\"\"\n\nimport copy\nimport numpy as np\nimport tempfile\nimport logging\nimport math\nfrom geometric.molecule import bohr2ang\n\nlogger = logging.getLogger(__name__)\n\nfrom . import addons\nimport geometric.optimize as gt \nfrom geometric.internal import CartesianCoordinates,\\\n PrimitiveInternalCoordinates, DelocalizedInternalCoordinates\nfrom geometric.nifty import ang2bohr\n\nlocalizer = addons.in_folder\ntest_logger = addons.test_logger\n\n_base_schema = {\n \"schema_version\": 1,\n \"molecule\": {\n \"geometry\": [\n 0.0, 0.0, -0.1294769411935893,\n 0.0, -1.494187339479985, 1.0274465079245698,\n 0.0, 1.494187339479985, 1.0274465079245698\n ],\n \"symbols\": [\"O\", \"H\", \"H\"],\n \"connectivity\": [[0, 1, 1], [0, 2, 1]]\n },\n \"driver\": \"gradient\",\n \"model\": {\n \"method\": \"UFF\",\n \"basis\": None\n },\n \"keywords\": {},\n \"program\": \"rdkit\"\n } # yapf: disable\n\n_geo2 = [0.0139, -0.4830, 0.2848,\n 0.0628, -0.2860, 0.7675,\n 0.0953, -1.0031, 0.4339]\n\[email protected]_qcengine\[email protected]_rdkit\n\n\n\n\nclass BatchOptimizer(object):\n \"\"\" Demo BatchOptmizer for runnig pytest test \"\"\"\n \n def __init__(self, **kwargs):\n self.kwargs = kwargs\n self.params = gt.OptParams(**kwargs)\n \n \n def _initOptimizer(self, schemas):\n \"\"\" initilize all OptObjects for the schmas passed.\n \n Arguements\n ----------\n schemas: list of schemas for qcengine\n \n return\n ------\n list of OptOject's for each schema\n \"\"\"\n \n #=========================================#\n #| Set up the internal coordinate system |#\n #=========================================#\n # First item in tuple: The class to be initialized\n # Second item in tuple: Whether to connect non-bonded fragments\n # Third item in tuple: Whether to throw in all Cartesian (no effect if second item is True)\n CoordSysDict = {'cart':(CartesianCoordinates, False, False),\n 'prim':(PrimitiveInternalCoordinates, True, False),\n 'dlc':(DelocalizedInternalCoordinates, True, False),\n 'hdlc':(DelocalizedInternalCoordinates, False, True),\n 'tric':(DelocalizedInternalCoordinates, False, False)}\n coordsys = self.kwargs.get('coordsys', 'tric')\n CoordClass, connect, addcart = CoordSysDict[coordsys.lower()]\n\n optimizers = []\n for schema in schemas:\n M, engine = gt.get_molecule_engine(engine='qcengine', qcschema=schema, **self.kwargs)\n coords = M.xyzs[0].flatten() * ang2bohr\n \n # Read in the constraints\n constraints = self.kwargs.get('constraints', None) #Constraint input file (optional)\n if constraints is not None:\n Cons, CVals = gt.ParseConstraints(M, open(constraints).read())\n else:\n Cons = None\n CVals = None\n \n IC = CoordClass(M, build=True, connect=connect, addcart=addcart, constraints=Cons, \n cvals=CVals[0] if CVals is not None else None)\n tmpDir = tempfile.mkdtemp(\".tmp\", \"batchOpt\")\n\n optimizer = gt.Optimizer(coords, M, IC, engine, tmpDir, self.params)\n optimizer.calcEnergyForce()\n optimizer.prepareFirstStep()\n logger.debug(\"[AU]: e=%.5f bl=%.5f,%.5f g=%.4f\" % (\n optimizer.E, optimizer.X[0],optimizer.X[3], optimizer.gradx[0]))\n optimizers.append(optimizer)\n \n return optimizers\n \n\n def _batchComputeEnergyAndForces(self, optimizers):\n \"\"\" This just an mockup. if this was NNP this would work in one batch\n on the GPU.\n \"\"\"\n for optimizer in optimizers:\n if optimizer.state == gt.OPT_STATE.NEEDS_EVALUATION:\n optimizer.calcEnergyForce()\n logger.debug(\"[AU]: e=%.5f bl=%.5f,%.5f g=%.4f\" % (\n optimizer.E, optimizer.X[0],optimizer.X[3], optimizer.gradx[0]))\n \n def optimizeMols(self, schemas):\n \"\"\" Optmize all molecules as represented by the schemas.\n \n return\n ------\n list of optimized Molecule's\n \"\"\"\n optimizers = self._initOptimizer(schemas)\n res = []\n \n # Optimization Loop, while not all have completed optimization\n while len(optimizers) > 0:\n nextOptObjs = []\n \n # take one step, energy and gradient must have been stored in optObj\n for optimizer in optimizers: \n optimizer.step()\n\n self._batchComputeEnergyAndForces(optimizers)\n\n # evaluate step\n for optimizer in optimizers: \n if optimizer.state == gt.OPT_STATE.NEEDS_EVALUATION:\n \n optimizer.evaluateStep() \n if optimizer.state in [gt.OPT_STATE.CONVERGED, gt.OPT_STATE.FAILED]:\n logger.info(\"Optmization convereged!\")\n res.append(optimizer.progress)\n continue\n nextOptObjs.append(optimizer)\n if len(nextOptObjs) == 0: break ######## All Done\n \n # step and evaluation completed, next step for remaining conformations\n optimizers = nextOptObjs\n \n return res\n \n\[email protected]_qcengine\[email protected]_rdkit\ndef test_rdkit_simple(test_logger):\n\n schema1 = copy.deepcopy(_base_schema)\n schema2 = copy.deepcopy(_base_schema)\n schema2['molecule']['geometry']= [c / bohr2ang for c in _geo2]\n \n opts = {\"qcengine\": True, \"input\": \"tmp_data\", \"qce_program\": \"rdkit\"}\n\n bOptimizer = BatchOptimizer(**opts)\n ret = bOptimizer.optimizeMols([schema1, schema2])\n\n # Currently in angstrom\n ref = np.array([0., 0., -0.0644928042, 0., -0.7830365196, 0.5416895554, 0., 0.7830365196, 0.5416895554])\n assert np.allclose(ref, ret[0].xyzs[-1].ravel(), atol=1.e-5)\n \n # check that distances in ref are same as in ret[1]\n refAt = ref.reshape(-1,3)\n retAt = ret[1].xyzs[-1]\n for atRef,atRet in zip(refAt,retAt):\n for atRef2,atRet2 in zip(refAt,retAt):\n d2Ref = np.power(atRef[0]-atRef2[0],2) + np.power(atRef[1]-atRef2[1],2) +np.power(atRef[2]-atRef2[2],2)\n d2Ret = np.power(atRet[0]-atRet2[0],2) + np.power(atRet[1]-atRet2[1],2) +np.power(atRet[2]-atRet2[2],2)\n \n assert math.isclose(d2Ref, d2Ret, abs_tol=1e-3)\n \n \n \n\n_N2_schema = {\n \"schema_version\": 1,\n \"molecule\": {\n \"geometry\": [\n 0.0, 0., 0.,\n 1.9, 0., 0.\n ],\n \"symbols\": [\"N\", \"N\"],\n \"connectivity\": [[0, 1, 3]]\n },\n \"driver\": \"gradient\",\n \"model\": {\n \"method\": \"UFF\",\n \"basis\": None\n },\n \"keywords\": {},\n \"program\": \"rdkit\"\n } # yapf: disable\n\n_N2_geo2 = [0.0, 0., 0.,\n 0.6, 0., 0.,]\n\n\n\[email protected]_qcengine\[email protected]_rdkit\ndef test_rdkit_N2(test_logger):\n\n schema1 = copy.deepcopy(_N2_schema)\n schema2 = copy.deepcopy(_N2_schema)\n schema2['molecule']['geometry']= [c / bohr2ang for c in _N2_geo2]\n \n opts = {\"qcengine\": True, \"input\": \"tmp_data\", \"qce_program\": \"rdkit\"}\n\n bOptimizer = BatchOptimizer(**opts)\n ret = bOptimizer.optimizeMols([schema1, schema2])\n\n # Currently in angstrom\n ref = np.array([-0.05729, 0., 0., 1.06272, 0., 0.])\n assert np.allclose(ref, ret[0].xyzs[-1].ravel(), atol=1.e-3)\n \n # check that distances in ref are same as in ret[1]\n refAt = ref.reshape(-1,3)\n retAt = ret[1].xyzs[-1]\n for atRef,atRet in zip(refAt,retAt):\n for atRef2,atRet2 in zip(refAt,retAt):\n d2Ref = np.power(atRef[0]-atRef2[0],2) + np.power(atRef[1]-atRef2[1],2) +np.power(atRef[2]-atRef2[2],2)\n d2Ret = np.power(atRet[0]-atRet2[0],2) + np.power(atRet[1]-atRet2[1],2) +np.power(atRet[2]-atRet2[2],2)\n \n assert math.isclose(d2Ref, d2Ret, abs_tol=1e-3)\n"
] |
[
[
"numpy.array",
"numpy.power"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kapteyn-astro/kapteyn
|
[
"f12332cfd567c7c0da40628dcfc7b297971ee636",
"f12332cfd567c7c0da40628dcfc7b297971ee636",
"f12332cfd567c7c0da40628dcfc7b297971ee636",
"f12332cfd567c7c0da40628dcfc7b297971ee636"
] |
[
"doc/source/EXAMPLES/mu_ticklabeldemo.py",
"kapteyn/interpolation.py",
"doc/source/EXAMPLES/mu_graticule.py",
"kapteyn/celestial.py"
] |
[
"from kapteyn import maputils\nfrom matplotlib import pylab as plt\n\nheader = {'NAXIS': 2 ,'NAXIS1':100 , 'NAXIS2': 100 ,\n'CDELT1': -7.165998823000E-03, 'CRPIX1': 5.100000000000E+01 ,\n'CRVAL1': -5.128208479590E+01, 'CTYPE1': 'RA---NCP', 'CUNIT1': 'DEGREE ',\n'CDELT2': 7.165998823000E-03, 'CRPIX2': 5.100000000000E+01,\n'CRVAL2': 6.015388802060E+01, 'CTYPE2': 'DEC--NCP ', 'CUNIT2': 'DEGREE'\n}\n\nfig = plt.figure()\nframe = fig.add_axes([0.20,0.15,0.75,0.8])\nf = maputils.FITSimage(externalheader=header)\nannim = f.Annotatedimage(frame)\ngrat = annim.Graticule()\ngrat2 = annim.Graticule(skyout='Galactic')\ngrat.setp_ticklabel(plotaxis=\"bottom\", position=\"20h34m\", fmt=\"%g\",\n color='r', rotation=30)\ngrat.setp_ticklabel(plotaxis='left', color='b', rotation=20,\n fontsize=14, fontweight='bold', style='italic')\ngrat.setp_ticklabel(plotaxis='left', color='m', position=\"60d0m0s\", \n fmt=\"DMS\", tex=False) \ngrat.setp_axislabel(plotaxis='left', xpos=-0.25, ypos=0.5)\n# Rotation is inherited from previous setting \ngrat2.setp_gratline(color='g')\ngrat2.setp_ticklabel(visible=False)\ngrat2.setp_axislabel(visible=False)\n\nannim.plot()\nplt.show()\n",
"# Copyright (C) 2003-2005 Peter J. Verveer\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# 3. The name of the author may not be used to endorse or promote\n# products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS\n# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\n# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport math\nimport numpy\nfrom . import _ni_support\nfrom kapteyn import _nd_image\n\ndef _extend_mode_to_code(mode):\n mode = _ni_support._extend_mode_to_code(mode)\n return mode\n\ndef spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,\n output_type = None):\n \"\"\"Calculates a one-dimensional spline filter along the given axis.\n\n The lines of the array along the given axis are filtered by a\n spline filter. The order of the spline must be >= 2 and <= 5.\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n output, return_value = _ni_support._get_output(output, input,\n output_type)\n if order in [0, 1]:\n output[...] = numpy.array(input)\n else:\n axis = _ni_support._check_axis(axis, input.ndim)\n _nd_image.spline_filter1d(input, order, axis, output)\n return return_value\n\n\ndef spline_filter(input, order = 3, output = numpy.float64,\n output_type = None):\n \"\"\"Multi-dimensional spline filter.\n\n Note: The multi-dimensional filter is implemented as a sequence of\n one-dimensional spline filters. The intermediate arrays are stored\n in the same data type as the output. Therefore, for output types\n with a limited precision, the results may be imprecise because\n intermediate results may be stored with insufficient precision.\n \"\"\"\n if order < 2 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n output, return_value = _ni_support._get_output(output, input,\n output_type)\n if order not in [0, 1] and input.ndim > 0:\n for axis in range(input.ndim):\n spline_filter1d(input, order, axis, output = output)\n input = output\n else:\n output[...] = input[...]\n return return_value\n\ndef geometric_transform(input, mapping, output_shape = None,\n output_type = None, output = None, order = 3,\n mode = 'constant', cval = 0.0, prefilter = True,\n extra_arguments = (), extra_keywords = {}):\n \"\"\"Apply an arbritrary geometric transform.\n\n The given mapping function is used to find, for each point in the\n output, the corresponding coordinates in the input. The value of the\n input at those coordinates is determined by spline interpolation of\n the requested order.\n\n mapping must be a callable object that accepts a tuple of length\n equal to the output array rank and returns the corresponding input\n coordinates as a tuple of length equal to the input array\n rank. Points outside the boundaries of the input are filled\n according to the given mode ('constant', 'nearest', 'reflect' or\n 'wrap'). The output shape can optionally be given. If not given,\n it is equal to the input shape. The parameter prefilter determines\n if the input is pre-filtered before interpolation (necessary for\n spline interpolation of order > 1). If False it is assumed that\n the input is already filtered. The extra_arguments and\n extra_keywords arguments can be used to provide extra arguments\n and keywords that are passed to the mapping function at each call.\n\n Example\n -------\n >>> a = arange(12.).reshape((4,3))\n >>> def shift_func(output_coordinates):\n ... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)\n ...\n >>> print geometric_transform(a,shift_func)\n array([[ 0. , 0. , 0. ],\n [ 0. , 1.3625, 2.7375],\n [ 0. , 4.8125, 6.1875],\n [ 0. , 8.2625, 9.6375]])\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n if output_shape is None:\n output_shape = input.shape\n if input.ndim < 1 or len(output_shape) < 1:\n raise RuntimeError('input and output rank must be > 0')\n mode = _extend_mode_to_code(mode)\n if prefilter and order > 1:\n filtered = spline_filter(input, order, output = numpy.float64)\n else:\n filtered = input\n output, return_value = _ni_support._get_output(output, input,\n output_type, shape = output_shape)\n _nd_image.geometric_transform(filtered, mapping, None, None, None,\n output, order, mode, cval, extra_arguments, extra_keywords)\n return return_value\n\n\ndef map_coordinates(input, coordinates, output_type = None, output = None,\n order = 3, mode = 'constant', cval = 0.0, prefilter = True):\n \"\"\"\n Map the input array to new coordinates by interpolation.\n\n The array of coordinates is used to find, for each point in the output,\n the corresponding coordinates in the input. The value of the input at\n those coordinates is determined by spline interpolation of the\n requested order.\n\n The shape of the output is derived from that of the coordinate\n array by dropping the first axis. The values of the array along\n the first axis are the coordinates in the input array at which the\n output value is found.\n\n Parameters\n ----------\n input : ndarray\n The input array\n coordinates : array_like\n The coordinates at which `input` is evaluated.\n output_type : deprecated\n Use `output` instead.\n output : dtype, optional\n If the output has to have a certain type, specify the dtype.\n The default behavior is for the output to have the same type\n as `input`.\n order : int, optional\n The order of the spline interpolation, default is 3.\n The order has to be in the range 0-5.\n mode : str, optional\n Points outside the boundaries of the input are filled according\n to the given mode ('constant', 'nearest', 'reflect' or 'wrap').\n Default is 'constant'.\n cval : scalar, optional\n Value used for points outside the boundaries of the input if\n `mode='constant`. Default is 0.0\n prefilter : bool, optional\n The parameter prefilter determines if the input is\n pre-filtered with `spline_filter`_ before interpolation\n (necessary for spline interpolation of order > 1).\n If False, it is assumed that the input is already filtered.\n\n Returns\n -------\n return_value : ndarray\n The result of transforming the input. The shape of the\n output is derived from that of `coordinates` by dropping\n the first axis.\n\n\n See Also\n --------\n spline_filter, geometric_transform, scipy.interpolate\n\n Examples\n --------\n >>> import scipy.ndimage\n >>> a = np.arange(12.).reshape((4,3))\n >>> print a\n array([[ 0., 1., 2.],\n [ 3., 4., 5.],\n [ 6., 7., 8.],\n [ 9., 10., 11.]])\n >>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)\n [ 2. 7.]\n\n Above, the interpolated value of a[0.5, 0.5] gives output[0], while\n a[2, 1] is output[1].\n\n >>> inds = np.array([[0.5, 2], [0.5, 4]])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)\n array([ 2. , -33.3])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')\n array([ 2., 8.])\n >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)\n array([ True, False], dtype=bool\n\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n coordinates = numpy.asarray(coordinates)\n if numpy.iscomplexobj(coordinates):\n raise TypeError('Complex type not supported')\n output_shape = coordinates.shape[1:]\n if input.ndim < 1 or len(output_shape) < 1:\n raise RuntimeError('input and output rank must be > 0')\n if coordinates.shape[0] != input.ndim:\n raise RuntimeError('invalid shape for coordinate array')\n mode = _extend_mode_to_code(mode)\n if prefilter and order > 1:\n filtered = spline_filter(input, order, output = numpy.float64)\n else:\n filtered = input\n output, return_value = _ni_support._get_output(output, input,\n output_type, shape = output_shape)\n _nd_image.geometric_transform(filtered, None, coordinates, None, None,\n output, order, mode, cval, None, None)\n return return_value\n\n\ndef affine_transform(input, matrix, offset = 0.0, output_shape = None,\n output_type = None, output = None, order = 3,\n mode = 'constant', cval = 0.0, prefilter = True):\n \"\"\"Apply an affine transformation.\n\n The given matrix and offset are used to find for each point in the\n output the corresponding coordinates in the input by an affine\n transformation. The value of the input at those coordinates is\n determined by spline interpolation of the requested order. Points\n outside the boundaries of the input are filled according to the given\n mode. The output shape can optionally be given. If not given it is\n equal to the input shape. The parameter prefilter determines if the\n input is pre-filtered before interpolation, if False it is assumed\n that the input is already filtered.\n\n The matrix must be two-dimensional or can also be given as a\n one-dimensional sequence or array. In the latter case, it is\n assumed that the matrix is diagonal. A more efficient algorithms\n is then applied that exploits the separability of the problem.\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n if output_shape is None:\n output_shape = input.shape\n if input.ndim < 1 or len(output_shape) < 1:\n raise RuntimeError('input and output rank must be > 0')\n mode = _extend_mode_to_code(mode)\n if prefilter and order > 1:\n filtered = spline_filter(input, order, output = numpy.float64)\n else:\n filtered = input\n output, return_value = _ni_support._get_output(output, input,\n output_type, shape = output_shape)\n matrix = numpy.asarray(matrix, dtype = numpy.float64)\n if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:\n raise RuntimeError('no proper affine matrix provided')\n if matrix.shape[0] != input.ndim:\n raise RuntimeError('affine matrix has wrong number of rows')\n if matrix.ndim == 2 and matrix.shape[1] != output.ndim:\n raise RuntimeError('affine matrix has wrong number of columns')\n if not matrix.flags.contiguous:\n matrix = matrix.copy()\n offset = _ni_support._normalize_sequence(offset, input.ndim)\n offset = numpy.asarray(offset, dtype = numpy.float64)\n if offset.ndim != 1 or offset.shape[0] < 1:\n raise RuntimeError('no proper offset provided')\n if not offset.flags.contiguous:\n offset = offset.copy()\n if matrix.ndim == 1:\n _nd_image.zoom_shift(filtered, matrix, offset, output, order,\n mode, cval)\n else:\n _nd_image.geometric_transform(filtered, None, None, matrix, offset,\n output, order, mode, cval, None, None)\n return return_value\n\n\ndef shift(input, shift, output_type = None, output = None, order = 3,\n mode = 'constant', cval = 0.0, prefilter = True):\n \"\"\"Shift an array.\n\n The array is shifted using spline interpolation of the requested\n order. Points outside the boundaries of the input are filled according\n to the given mode. The parameter prefilter determines if the input is\n pre-filtered before interpolation, if False it is assumed that the\n input is already filtered.\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n if input.ndim < 1:\n raise RuntimeError('input and output rank must be > 0')\n mode = _extend_mode_to_code(mode)\n if prefilter and order > 1:\n filtered = spline_filter(input, order, output = numpy.float64)\n else:\n filtered = input\n output, return_value = _ni_support._get_output(output, input,\n output_type)\n shift = _ni_support._normalize_sequence(shift, input.ndim)\n shift = [-ii for ii in shift]\n shift = numpy.asarray(shift, dtype = numpy.float64)\n if not shift.flags.contiguous:\n shift = shift.copy()\n _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)\n return return_value\n\n\ndef zoom(input, zoom, output_type = None, output = None, order = 3,\n mode = 'constant', cval = 0.0, prefilter = True):\n \"\"\"Zoom an array.\n\n The array is zoomed using spline interpolation of the requested order.\n Points outside the boundaries of the input are filled according to the\n given mode. The parameter prefilter determines if the input is pre-\n filtered before interpolation, if False it is assumed that the input\n is already filtered.\n \"\"\"\n if order < 0 or order > 5:\n raise RuntimeError('spline order not supported')\n input = numpy.asarray(input)\n if numpy.iscomplexobj(input):\n raise TypeError('Complex type not supported')\n if input.ndim < 1:\n raise RuntimeError('input and output rank must be > 0')\n mode = _extend_mode_to_code(mode)\n if prefilter and order > 1:\n filtered = spline_filter(input, order, output = numpy.float64)\n else:\n filtered = input\n zoom = _ni_support._normalize_sequence(zoom, input.ndim)\n output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])\n zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)\n output, return_value = _ni_support._get_output(output, input,\n output_type, shape = output_shape)\n zoom = numpy.asarray(zoom, dtype = numpy.float64)\n zoom = numpy.ascontiguousarray(zoom)\n _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)\n return return_value\n\ndef _minmax(coor, minc, maxc):\n if coor[0] < minc[0]:\n minc[0] = coor[0]\n if coor[0] > maxc[0]:\n maxc[0] = coor[0]\n if coor[1] < minc[1]:\n minc[1] = coor[1]\n if coor[1] > maxc[1]:\n maxc[1] = coor[1]\n return minc, maxc\n\ndef rotate(input, angle, axes = (1, 0), reshape = True,\n output_type = None, output = None, order = 3,\n mode = 'constant', cval = 0.0, prefilter = True):\n \"\"\"Rotate an array.\n\n The array is rotated in the plane defined by the two axes given by the\n axes parameter using spline interpolation of the requested order. The\n angle is given in degrees. Points outside the boundaries of the input\n are filled according to the given mode. If reshape is true, the output\n shape is adapted so that the input array is contained completely in\n the output. The parameter prefilter determines if the input is pre-\n filtered before interpolation, if False it is assumed that the input\n is already filtered.\n \"\"\"\n input = numpy.asarray(input)\n axes = list(axes)\n rank = input.ndim\n if axes[0] < 0:\n axes[0] += rank\n if axes[1] < 0:\n axes[1] += rank\n if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:\n raise RuntimeError('invalid rotation plane specified')\n if axes[0] > axes[1]:\n axes = axes[1], axes[0]\n angle = numpy.pi / 180 * angle\n m11 = math.cos(angle)\n m12 = math.sin(angle)\n m21 = -math.sin(angle)\n m22 = math.cos(angle)\n matrix = numpy.array([[m11, m12],\n [m21, m22]], dtype = numpy.float64)\n iy = input.shape[axes[0]]\n ix = input.shape[axes[1]]\n if reshape:\n mtrx = numpy.array([[ m11, -m21],\n [-m12, m22]], dtype = numpy.float64)\n minc = [0, 0]\n maxc = [0, 0]\n coor = numpy.dot(mtrx, [0, ix])\n minc, maxc = _minmax(coor, minc, maxc)\n coor = numpy.dot(mtrx, [iy, 0])\n minc, maxc = _minmax(coor, minc, maxc)\n coor = numpy.dot(mtrx, [iy, ix])\n minc, maxc = _minmax(coor, minc, maxc)\n oy = int(maxc[0] - minc[0] + 0.5)\n ox = int(maxc[1] - minc[1] + 0.5)\n else:\n oy = input.shape[axes[0]]\n ox = input.shape[axes[1]]\n offset = numpy.zeros((2,), dtype = numpy.float64)\n offset[0] = float(oy) / 2.0 - 0.5\n offset[1] = float(ox) / 2.0 - 0.5\n offset = numpy.dot(matrix, offset)\n tmp = numpy.zeros((2,), dtype = numpy.float64)\n tmp[0] = float(iy) / 2.0 - 0.5\n tmp[1] = float(ix) / 2.0 - 0.5\n offset = tmp - offset\n output_shape = list(input.shape)\n output_shape[axes[0]] = oy\n output_shape[axes[1]] = ox\n output_shape = tuple(output_shape)\n output, return_value = _ni_support._get_output(output, input,\n output_type, shape = output_shape)\n if input.ndim <= 2:\n affine_transform(input, matrix, offset, output_shape, None, output,\n order, mode, cval, prefilter)\n else:\n coordinates = []\n size = numpy.product(input.shape,axis=0)\n size /= input.shape[axes[0]]\n size /= input.shape[axes[1]]\n for ii in range(input.ndim):\n if ii not in axes:\n coordinates.append(0)\n else:\n coordinates.append(slice(None, None, None))\n iter_axes = list(range(input.ndim))\n iter_axes.reverse()\n iter_axes.remove(axes[0])\n iter_axes.remove(axes[1])\n os = (output_shape[axes[0]], output_shape[axes[1]])\n for ii in range(size):\n ia = input[tuple(coordinates)]\n oa = output[tuple(coordinates)]\n affine_transform(ia, matrix, offset, os, None, oa, order, mode,\n cval, prefilter)\n for jj in iter_axes:\n if coordinates[jj] < input.shape[jj] - 1:\n coordinates[jj] += 1\n break\n else:\n coordinates[jj] = 0\n return return_value\n",
"from kapteyn import maputils\nfrom matplotlib import pyplot as plt\n\nf = maputils.FITSimage(\"m101.fits\")\nf.set_limits(pxlim=(50,440), pylim=(50,450))\n\nfig = plt.figure(figsize=(8,5.5))\nframe = fig.add_axes((0.05, 0.1, 0.8, 0.7))\nfig.text(0.5, 0.96, \"Combination of plot objects\", \n horizontalalignment='center',\n fontsize=14, color='r')\n\nannim = f.Annotatedimage(frame, clipmin=3000, clipmax=15000)\ncont = annim.Contours(levels=list(range(8000,14000,1000)))\ncont.setp_contour(linewidth=1)\ncont.setp_contour(levels=11000, color='g', linewidth=2)\ncb = annim.Colorbar(clines=True, orientation='vertical', fontsize=8, linewidths=5)\ngr = annim.Graticule()\ngr.setp_ticklabel(wcsaxis=0, fmt='HMS')\nilab = gr.Insidelabels(color='b', ha='left')\nilab.setp_label(position='14h03m0s', fontsize=15) \n\n# Plot a second graticule for the galactic sky system\ngr2 = annim.Graticule(deltax=7.5/60, deltay=5.0/60,\n skyout=\"galactic\", \n visible=True)\ngr2.setp_axislabel(plotaxis=(\"top\",\"right\"), label=\"Galactic l,b\",\n color='g', visible=True)\ngr2.setp_axislabel(plotaxis=(\"left\",\"bottom\"), visible=False)\ngr2.set_tickmode(plotaxis=(\"top\",\"right\"), mode=\"Native\")\ngr2.set_tickmode(plotaxis=(\"left\",\"bottom\"), mode=\"NO\")\ngr2.setp_ticklabel(wcsaxis=(0,1), color='g')\ngr2.setp_ticklabel(plotaxis='right', fmt='DMs')\ngr2.setp_tickmark(plotaxis='right', markersize=8, markeredgewidth=2)\ngr2.setp_gratline(wcsaxis=(0,1), color='g')\nannim.Ruler(x1=120, y1=100, x2=120, y2=330, step=1/60.0)\nr1 = annim.Ruler(pos1='ga 102d0m, 59d50m', pos2='ga 102d7m30s, 59d50m', \n world=True, step=1/60.0)\nr1.setp_line(color='#ff22ff', lw=6)\nr1.setp_label(color='m')\nannim.Pixellabels(plotaxis='top', va='top')\npl = annim.Pixellabels(plotaxis='right')\npl.setp_marker(color='c', markersize=10)\npl.setp_label(color='m')\n\nannim.plot()\nplt.show()\n",
"#!/usr/bin/env python\n#----------------------------------------------------------------------\n# FILE: celestial.py\n# PURPOSE: Build a matrix for conversions between sky systems and or\n# celestial reference systems and epochs.\n# In the calling environment one is supposed to use the function \n# 'skymatrix' only. The other functions are helper functions.\n# AUTHOR: M.G.R. Vogelaar, University of Groningen, The Netherlands\n# DATE: December 12, 2007\n# UPDATE: April 17, 2008\n# June 29, 2009: Changed docstrings for Sphinx\n# VERSION: 1.0\n#\n# (C) University of Groningen\n# Kapteyn Astronomical Institute\n# Groningen, The Netherlands\n# E: [email protected]\n#----------------------------------------------------------------------\n\"\"\"\nModule Celestial\n================\n\nThis document describes functions from the Python module *celestial*\n(celestial.py) which provides a programmer with a basic set of\nroutines to transform a world coordinate in a given sky system\ninto a world coordinate of another system assuming\nzero proper motion, parallax, and recessional velocity.\n\nThe most important function\nbuilds a matrix for conversions of positions between sky systems,\ncelestial reference systems and epochs of the equinox.\nThis function is called :func:`skymatrix` and it can be used in the following\ncontexts:\n\n * Implicit, in module *wcs*, using the *Transformation* class as in::\n\n world_eq = (192.25, 27.4) # FK4 coordinates of galactic pole\n tran = wcs.Transformation(\"equatorial fk4_no_e B1950.0\", \"galactic\")\n print tran(world_eq)\n\n * As stand alone utility in scripts or in an interactive Python\n session. Usually one uses function :func:`sky2sky` to transform\n longitudes and latitudes::\n\n M = celestial.sky2sky( (celestial.eq, celestial.fk5), celestial.gal,\n (0,0,1.0), (10,20,20) )\n\n * Hidden in the *topixel()* and *toworld()* methods in module *wcs*.\n There the sky system is read from a (FITS) header and the \n sky system for which we want the transformed coordinates\n is set with attribute *skyout* of the projection object. \n \n.. index::\n single: Tutorial; Celestial\n module: celestial\n \n.. seealso:: Tutorial material:\n \n * :doc:`celestialbackground` which contains many examples with source code.\n\n\n.. _celestial-skydefinitions:\n \nSky definitions\n---------------\n\nA sky definition can consist of a *sky system*,\na *reference system*, an *equinox* and an *epoch of\nobservation*. It is either a string or it is a tuple with one or more elements.\nIt can also be a single element.\nThe elements in a tuple representing a sky- or reference system are symbols\nfrom the table below. For a string, the parts of the string representing a\nsky- or reference system are minimal matched against the strings in the table below.\nThe match is case insensitive.\n\n\n.. _celestial-skysystems:\n\nSky systems\n...........\n\n======================= ============= =============================================\nSymbol String Description\n======================= ============= =============================================\n*eq*, *equatorial* EQUATORIAL Equatorial coordinates (\\u03B1, \\u03B4),\n See also next table with reference systems\n*ecl*, *ecliptic* ECLIPTIC Ecliptic coordinates (\\u03BB, \\u03B2)\n referred to the ecliptic and mean equinox\n*gal*, *galactic* GALACTIC Galactic coordinates (lII, bII)\n*sgal*, *supergalactic* SUPERGALACTIC De Vaucouleurs Supergalactic\n coordinates (sgl, sgb)\n======================= ============= =============================================\n\n\n.. _celestial-refsystems:\n \nReference systems\n.................\n\n.. tabularcolumns:: |p{20mm}|p{20mm}|p{110mm}|\n\n======================= ============= =============================================\nSymbol String Description\n======================= ============= =============================================\n*fk4* FK4 Mean place pre-IAU 1976 system. FK4 is the\n old barycentric (i.e. w.r.t. the common\n center of mass) equatorial coordinate\n system, which should be qualified by an\n Equinox value.\n For accurate work FK4\n coordinate systems should also be qualified\n by an Epoch value. This is the *epoch of\n observation*.\n*fk4_no_e* FK4_NO_E, The old FK4 (barycentric) equatorial system\n FK4-NO-E but without the *E-terms of aberration*.\n This coordinate system should also be\n qualified by both an Equinox and an Epoch\n value.\n*fk5* FK5 Mean place post IAU 1976 system.\n Also a barycentric equatorial coordinate\n system.\n This should be qualified by an\n Equinox value (only).\n*icrs* ICRS The International Celestial Reference\n System, for optical data realized through\n the Hipparcos catalog.\n By definition, ICRS\n is not an equatorial system, but it is\n very close to the FK5 (J2000) system.\n No Equinox value is required.\n*j2000*, *dynj2000* DYNJ2000 This is an equatorial coordinate system\n based on the mean dynamical equator and\n equinox at epoch J2000.\n The dynamical equator and equinox differ\n slightly compared to the equator and equinox\n of FK5 at J2000 and the ICRS system.\n This system need not be qualified by an\n Equinox value\n======================= ============= =============================================\n\n\n.. note::\n Reference systems are stored in FITS headers under keyword *RADESYS=*.\n\n.. note::\n Standard in FITS: RADESYS defaults to IRCS unless EQUINOX is given alone,\n in which case it defaults to FK4 prior to 1984 and FK5 after 1984.\n\n EQUINOX defaults to 2000 unless RADESYS is FK4, in which case it defaults\n to 1950.\n\n.. note::\n In routines dealing with sky definitions tne names are minimal matched against\n a list with full names.\n \n.. _celestial-epochs:\n \nEpochs for the equinox and epoch of observation\n...............................................\n\nAn epoch can be set in various ways. The options are distinguished\nby a prefix. Only the 'B' and 'J' epochs can be negative.\n\n.. tabularcolumns:: |p{15mm}|p{135mm}|\n\n====== ===============================================================\nPrefix Epoch\n====== ===============================================================\nB Besselian epoch.\n Example: ``'B 1950'``, ``'b1950'``, ``'B1983.5'``, ``'-B1100'``\nJ Julian epoch.\n Example: ``'j2000.7'``, ``'J 2000'``, ``'-j100.0'``\nJD Julian date. This number of days (with decimals)\n that have elapsed since the initial epoch defined\n as noon Universal Time (UT) Monday, January 1, 4713 BC\n in the proleptic Julian calendar\n Example: ``'JD2450123.7'``\nMJD The Modified Julian Day (MJD) is the number of days\n that have elapsed since midnight at the beginning of\n Wednesday November 17, 1858. In terms of the Julian day:\n MJD = JD - 2400000.5\n Example: ``'mJD 24034'``, ``'MJD50123.2'``\nRJD The Reduced Julian Day (RJD): Julian date counted from\n nearly the same day as the MJD,\n but lacks the additional offset of 12 hours that MJD has.\n It therefore starts from the previous noon UT or TT,\n on Tuesday November 16, 1858. It is defined as:\n RJD = JD - 2400000\n Example: ``'rJD50123.2'``, ``'Rjd 23433'``\nF Various FITS formats:\n\n * DD/MM/YY Old FITS format.\n Example: ``'F29/11/57'``\n * YYYY-MM-DD FITS format.\n Example: ``'F2000-01-01'``\n * YYYY-MM-DDTHH:MM:SS FITS format with date and time.\n Example: ``'F2002-04-04T09:42:42.1'``\n\n====== ===============================================================\n\n**Epoch of observation**.\n\nReference system FK4 is not an inertial system. It is slowly rotating\nand positions are further away from the true mean places if the date of observation\nis greater than B1950. FK5 is an inertial system. If we convert coordinates\nfrom FK4 to FK5, the accuracy of the FK5 position can be improved\nif we know the date of the observation. So in all transformations where\na conversion between FK4 and FK5 is involved, an epoch of observation can\nbe part of the sky definition. Note that this also involves a conversion between\ngalactic coordinates and equatorial, FK5 coordinates because that conversion\nis done in steps and one step involves FK4.\n\nTo be able to distinguish an equinox from an epoch of observation, an epoch of\nobservation is followed by an underscore character and some arbitrary characters\nto indicate that it is a special epoch (e.q. \"B1960_OBS\"). Only the underscore is\nobligatory.\n\n.. note::\n If a sky definition is entered as a string, there cannot be a space\n between the prefix and the epoch, because a space is a separator\n for the parser in :func:`celestial.skyparser`. \n\n.. note::\n An *epoch of observation* is either the second epoch in your input or\n or the epoch string has a suffix '_' which may be followed by arbitrary\n characters (e.g. \"B1963.5_OBS\").\n\nInput Examples\n..............\n\n.. tabularcolumns:: |p{35mm}|p{25mm}|p{90mm}|\n\n========================== ========================== =====================================\nInput string Description Remarks\n========================== ========================== =====================================\n\"eq\" Equatorial, ICRS ICRS because no reference system\n and no equinox is given. \n\"Eclip\" Ecliptic, ICRS Ecliptic coordinates\n\"ecl fk5\" Ecliptic, FK5 Ecliptic coordinates with a non\n default reference system\n\"GALACtic\" Galactic II Minimal match is case insensitive\n\"s\" Supergalactic Shortest string to identify system.\n\"fk4\" Equatorial, FK4 Only a reference system is entered.\n Sky system is assumed to be\n equatorial\n\"B1960\" Equatorial, FK4 Only an equinox is given. This is\n a date before 1984 so FK4 is\n assumed. Therefore the sky system\n is equatorial\n\"EQ, fk4_no_e, B1960\" Equatorial, FK4 no e-terms Sky system, reference system,\n and an equinox\n\"EQ, fk4-no-e, B1960\" Equatorial, FK4 no e-terms Same as above but underscores\n replaced by hyphens.\n\"fk4,J1983.5_OBS\" Equatorial, FK4 + epobs FK4 with an epoch of observation.\n Note that only the underscore\n is important.\n\"J1983.5_OBS\" Equatorial, FK4 + epobs Only a date of observation. Then\n reference system FK4 is assumed.\n\"EQ,fk4,B1960, B1983.5_O\" Equatorial, FK4 + epobs A complete description of an\n equatorial system.\n\"B1983.5_O fk4 B1960,eq\" Equatorial, FK4 + epobs The same as above, showing that\n the order of the elements are\n unimportant.\n========================== ========================== =====================================\n\nCode examples\n.............\n\nTo show that one can use both the tuple and the string representation of a system,\nwe use both for the same system and compare a transformed position.\nThe result should be 0 for both coordinates.\n\n>>> world_eq = numpy.array([192.25, 27.4]) # FK4 coordinates of galactic pole\n>>> tran1 = wcs.Transformation(\"equatorial fk4_no_e B1950.0\", \"galactic\")\n>>> tran2 = wcs.Transformation((wcs.equatorial, wcs.fk4_no_e, 'B1950.0'), wcs.galactic)\n>>> print tran1(world_eq)-tran2(world_eq)\n[ 0. 0.]\n\n\nModule level data\n-----------------\n\n\n:data:`skyrefsystems`\n An object from class :class:`skyrefset` which is a container\n with a list with systems and two dictionaries with systems.\n\n >>> for s in skyrefsystems.skyrefs_list:\n >>> print s.fullname, s.description, s.idnum\n\nFor programmers who need to access the id's of the sky and reference systems: \nExternal modules can set their own variables.\nHere are some examples how one can do this.\n\nExample with copy of celestial's variables:\n \n * ``eq = celestial.eq``\n * ``ec = celestial.ecl``\n * ``ga = celestial.gal`` etc.\n\nExample with minimal match:\n \n * ``eq = celestial.skyrefsystems.minmatch2skyref('EQUA')[0].idnum``\n * ``ec = celestial.skyrefsystems.minmatch2skyref('ecli')[0].idnum``\n\nRead this as: get the object for which a minimal match\nis found. Item [0] is the object (the other is the number of times\na match is found). The 'idnum' is the integer for which we can\nidentify a system.\n\nOr use the equivalent with method :meth:`skyrefset.minmatch2id`:\n \n * ``eq = celestial.skyrefsystems.minmatch2id('EQUA')``\n * ``ec = celestial.skyrefsystems.minmatch2id('ecli')``\n\nExample with full name (case sensitive!):\n \n * ``eq = celestial.skyrefsystems.fullname2id('EQUATORIAL')``\n * ``ec = celestial.skyrefsystems.fullname2id('ECLIPTIC')``\n\n\n\nClasses\n-------\n\n.. autoclass:: skyrefsys\n.. autoclass:: skyrefset\n\n\nCore Functions\n--------------\n\n.. index:: Input syntax for sky definitions\n.. autofunction:: skyparser\n.. autofunction:: skymatrix\n.. autofunction:: sky2sky\n.. index:: Epoch conversions\n.. autofunction:: epochs\n\nUtility functions\n-----------------\n\n.. index:: Julian day number\n.. autofunction:: JD\n.. index:: Label formatting\n.. autofunction:: lon2hms\n.. autofunction:: lat2dms\n.. autofunction:: lon2dms\n.. index:: Besselian epochs\n.. autofunction:: JD2epochBessel\n.. autofunction:: epochBessel2JD\n.. index:: Julian epochs\n.. autofunction:: JD2epochJulian\n.. autofunction:: epochJulian2JD\n.. index:: Obliquity\n.. autofunction:: obliquity1980\n.. autofunction:: obliquity2000\n.. index:: Precession angles\n.. autofunction:: IAU2006precangles\n.. autofunction:: Lieskeprecangles\n.. autofunction:: Newcombprecangles\n\n\n.. index:: Rotation matrices\n\nRotation matrices\n-----------------\n\n.. autofunction:: MatrixEqB19502Gal\n.. autofunction:: MatrixGal2Sgal\n.. autofunction:: MatrixEq2Ecl\n.. autofunction:: FK42FK5Matrix\n.. autofunction:: ICRS2FK5Matrix\n.. autofunction:: ICRS2J2000Matrix\n.. autofunction:: JMatrixEpoch12Epoch2\n.. autofunction:: BMatrixEpoch12Epoch2\n.. autofunction:: IAU2006MatrixEpoch12Epoch2\n.. autofunction:: MatrixEpoch12Epoch2\n\n.. index:: Elliptic terms of aberration\n\nFunctions related to E-terms\n----------------------------\n\n.. autofunction:: getEterms\n.. autofunction:: addEterms\n.. autofunction:: removeEterms\n\n\"\"\"\nimport numpy as n\nimport types\nfrom re import split as re_split\nimport six\n\n\nclass skyrefsys(object):\n#----------------------------------------------------------------------\n \"\"\"\nClass creates an object that describes a sky- or reference system.\nThis module initializes a set of systems. They are accessible\nthrough methods in class :class:`celestial.skyrefset`\n\n:param fullname:\n Complete name to identify the system, e.g. *\"EQUATORIAL\"*\n:type fullname:\n String\n:param idnum:\n A unique integer to identify the system\n:type idnum:\n Integer\n:param description:\n A short description of the system\n:type description:\n String\n:param refsystem:\n Is this system a reference system?\n:type refsystem:\n Boolean\n\n\n**Attributes:**\n \n.. attribute:: fullname\n\n A string to identify a system, e.g. \"EQUATORIAL\".\n \n.. attribute:: idnum\n\n A unique integer to identify the system.\n\n.. attribute:: description\n\n A string to describe the system.\n \n.. attribute:: refsystem\n\n If *True* then this system is a reference system.\n Else it is a sky system.\n \n \"\"\"\n#----------------------------------------------------------------------\n def __init__(self, fullname, idnum, description, refsystem):\n self.fullname = fullname\n self.idnum = idnum\n self.description = description\n self.refsystem = refsystem # Boolean\n\n\n\nclass skyrefset(object):\n#----------------------------------------------------------------------\n \"\"\"\nA container with sky- and reference system objects from class\n:class:`celestial.skyrefsys`. It is used to initialize variables\nthat can be used as identifiers for sky- or reference systems.\nApplications can use its methods to retrieve information given\nan integer identifier or (part of) a string.\n\nFor example when we want a list with all the supported systems\nthen type: \n\n>>> for s in skyrefsystems.skyrefs_list:\n>>> print s.fullname, s.description, s.idnum\n\n.. automethod:: append\n.. automethod:: minmatch2skyref\n.. automethod:: minmatch2id\n.. automethod:: fullname2id\n.. automethod:: id2skyref\n.. automethod:: id2fullname\n.. automethod:: id2description\n\n**Attributes:**\n\n .. attribute:: skyrefs_list\n \n The list with systems\n \n .. attribute:: skyrefs_id\n \n A dictionary with the systems and with id's as keys\n \n .. attribute:: skyrefs_fullname\n \n A dictionary with the systems and with full names as keys\n\n:Examples: Next short script shows how to get a list with\n sky systems and how to use methods of this class to get data for\n a system if an (integer) id is found:: \n\n from kapteyn.celestial import skyrefsystems\n \n for s in skyrefsystems.skyrefs_list:\n print s.fullname, s.description, s.idnum\n i = s.idnum\n print \"Full name using id2fullname:\", skyrefsystems.id2fullname(i)\n print \"Description using id2description:\", skyrefsystems.id2description(i)\n print \"id of %s with minimal match: %d\" % \\\\\n (s.fullname[:3], skyrefsystems.minmatch2skyref(s.fullname[:3])[0].idnum)\n print \"id of %s with minimal match, alternative: %d\" % \\\\\n (s.fullname[:3], skyrefsystems.minmatch2id(s.fullname[:3]))\n print \"id of %s with full name: %d\" % \\\\\n (s.fullname[:3], skyrefsystems.fullname2id(s.fullname))\n\n \"\"\"\n#----------------------------------------------------------------------\n def __init__(self):\n self.skyrefs_list = [] # The list with systems\n self.skyrefs_id = {} # A dict. version with id's as keys\n self.skyrefs_fullname = {} # A dict. version with names as keys\n \n def append(self, skyrefsys):\n \"\"\"\n :param skyrefsys:\n Append this system to the list with supported systems\n :type skyrefsys:\n Instance of class :class:`skyrefsys`\n\n :Returns:\n A unique integer id which can be used to identify a system.\n \"\"\"\n self.skyrefs_list.append(skyrefsys)\n self.skyrefs_id[skyrefsys.idnum] = skyrefsys\n self.skyrefs_fullname[skyrefsys.fullname] = skyrefsys.idnum\n return skyrefsys.idnum\n \n def minmatch2skyref(self, s):\n \"\"\"\n Return the relevant skyrefsys object with the number of times\n it is matched or return None if nothing was found.\n\n :param s:\n Part of the string name of a system\n :type s:\n String\n\n :Returns:\n Instance of class :class:`skyrefsys` and the number of times\n that the input string gives a match.\n \"\"\"\n s = s.upper()\n if s.startswith(\"FK4\"): # Allow also FK4-NO-E. Replace hyphen by underscore\n s = s.replace('-','_')\n found = 0\n found_sk = None\n for sk in self.skyrefs_list:\n foundone = False\n if s == sk.fullname:\n found += 1\n found_sk = sk\n return found_sk, found # Exact match !\n else:\n i = sk.fullname.find(s, 0, len(s))\n if i == 0:\n found += 1\n found_sk = sk\n return found_sk, found\n \n def minmatch2id(self, s):\n \"\"\"\n From the found skyrefsys object corresponding to string *s*,\n return the idnum attribute. Case insensitive minimal match\n is used to find the sky- or reference system.\n Return None if there was no match or more than one match.\n\n :param s:\n Part of the string name of a system\n :type s:\n String\n\n :Returns:\n Instance of class :class:`skyrefsys` or None if there was not\n a match or more than one match.\n \"\"\"\n s = s.upper()\n found = 0\n found_sk = None\n for sk in self.skyrefs_list:\n foundone = False\n if s == sk.fullname:\n found += 1\n found_sk = sk\n return found_sk.idnum # Exact match !\n else:\n i = sk.fullname.find(s, 0, len(s))\n if i == 0:\n found += 1\n found_sk = sk\n if found == 1:\n return found_sk.idnum\n return None\n \n def fullname2id(self, fullname):\n \"\"\"\n This is the fastest method to get an integer id from a\n string which represents a sky system or a reference system.\n Note that the routine is case sensitive because it uses\n the full names as keys in a dictionary.\n The parameter *fullname* therefore must be in in capitals!\n\n :param fullname:\n The full descriptive name of a system e.g. \"EQUATORIAL\"\n :type fullname:\n String\n\n :Returns:\n Integer id of the found system or *None* if nothing was found.\n \"\"\"\n try:\n idnum = self.skyrefs_fullname[fullname]\n except:\n idnum = None\n return idnum\n \n def id2skyref(self, idnum):\n \"\"\"\n Given an integer id of a system, return the corresponding system\n as an instance of class :class:`skyrefsys`.\n Usually the calling environment will deal with the attributes of\n this object, for instance to write a short description of the system.\n\n :param idnum:\n Integer id of a system\n :type idnum:\n Integer\n\n :Returns:\n Instance of class :class:`skyrefsys` or None if there was not\n a corresponding system.\n \"\"\"\n try:\n sys = self.skyrefs_id[idnum]\n except:\n sys = None\n return sys\n \n def id2fullname(self, idnum):\n \"\"\"\n Given an integer id of a system, return the full name\n of the corresponding system.\n\n :param idnum:\n Integer id of a system\n :type idnum:\n Integer\n\n :Returns:\n Full name (e.g. \"EQUATORIAL\") of the \n corresponding system or an empty string if nothing was found.\n \"\"\"\n try:\n fullname = self.skyrefs_id[idnum].fullname\n except:\n fullname = ''\n return fullname\n \n def id2description(self, idnum):\n \"\"\"\n Given an integer id of a system, return the description\n of the corresponding system.\n\n :param idnum:\n Integer id of a system\n :type idnum:\n Integer\n\n :Returns:\n A short description of the \n corresponding system or an empty string if nothing was found.\n \"\"\"\n try:\n descr = self.skyrefs_id[idnum].description\n except:\n descr = ''\n return descr\n\n\n# Create a collection of sky systems and reference systems.\n# The integer is an identifier and the last parameter tells you\n# whether the system is a reference system or not.\n# Also a set of global integer variable is created to facilitate\n# parsers in this module.\nskyrefsystems = skyrefset()\neq = skyrefsystems.append(skyrefsys('EQUATORIAL', 0, \"Equatorial\", False))\necl = skyrefsystems.append(skyrefsys('ECLIPTIC', 1, \"Ecliptic\", False))\ngal = skyrefsystems.append(skyrefsys('GALACTIC', 2, \"Galactic II\", False))\nsgal = skyrefsystems.append(skyrefsys('SUPERGALACTIC', 3, \"Supergalactic\", False))\nfk4 = skyrefsystems.append(skyrefsys('FK4', 4, \"Fourth Fundamental Catalogue\", True))\nfk4_no_e = skyrefsystems.append(skyrefsys('FK4_NO_E', 5, \"FK4 without E-terms\", True))\nfk5 = skyrefsystems.append(skyrefsys('FK5', 6, \"Fifth Fundamental Catalogue \", True))\nicrs = skyrefsystems.append(skyrefsys('ICRS', 7, \"International Celestial Reference System\", True))\nj2000 = skyrefsystems.append(skyrefsys('DYNJ2000', 8, \"Dynamic J2000\", True))\n\n# Some aliases\nequatorial = eq; ecliptic = ecl; galactic = gal; supergalactic = sgal; dynj2000 = j2000 \n\n\n#for s in skyrefsystems.skyrefs_list:\n# print s.fullname, s.description, s.idnum\n# Tests:\n# print \"EQ, EC:\", eq, ecl, gal, sgal, fk4, fk4_no_e, fk5, icrs, j2000\n#\n#for i in range(10):\n# s = skyrefsystems.id2skyref(i)\n# if s != None:\n# print s.fullname, s.description, s.idnum\n# print \"Full name using id2fullname:\", skyrefsystems.id2fullname(i)\n# print \"Description using id2description:\", skyrefsystems.id2description(i)\n# print \"id of %s with minimal match: %d\" % (s.fullname[:3], skyrefsystems.minmatch2skyref(s.fullname[:3])[0].idnum)\n# print \"id of %s with full name: %d\" % (s.fullname[:3], skyrefsystems.fullname2id(s.fullname))\n# print \"id of %s with minimal match 2: %d\" % (s.fullname[:3], skyrefsystems.minmatch2id(s.fullname[:3]))\n\n\n\n# Conversion factors deg <-> rad\nconvd2r = n.pi/180.0\nconvr2d = 180.0/n.pi\n\n\n#----------------------------------------------------------------------\n# Some utility routines\n#---------------------------------------------------------------------\ndef d2r(degs):\n return degs * convd2r\n\ndef r2d(rads):\n return rads * convr2d\n\ndef I():\n return n.identity(3, dtype='d')\n\n\ndef JD(year, month, day):\n#----------------------------------------------------------------------\n \"\"\"\nCalculate Julian day number (Julian date)\n\n:param year:\n Year (nnnn)\n:type year:\n Integer\n:param month:\n Month (nn)\n:type month:\n Integer\n:param day:\n Day (nn.n...)\n:type day:\n Floating point number\n \n \n:Returns:\n Julian day number *jd*.\n\n:Reference:\n Meeus, Astronomical formula for Calculators, 2nd ed, 1982\n\n:Notes:\n Months start at 1. Days start at 1. The Julian day begins at\n Greenwich mean noon, i.e. at 12h. So Jan 1, 1984 at 0h is\n entered as *JD(1984,1,1)* and Jan 1, 1984 at 12h is entered\n as *JD(1984,1,1.5)*\n\n There is a jump at *JD(1582,10,15)* caused by a change of\n calendars. For dates after 1582-10-15 one enters a date\n from the Julian calendar and before this date you enter a\n date from the Gregorian calendar.\n\n:Examples:\n * Julian date of JD reference:\n ``print celestial.JD(-4712,1,1.5) ==> 0.0``\n * The first day of 1 B.C.:\n ``print celestial.JD(0,1,1) ==> 1721057.5``\n * Last day before Gregorian reform:\n ``print celestial.JD(1582,10,4) ==> 2299159.5``\n * First day of Gregorian reform:\n ``print celestial.JD(1582,10,15) ==> 2299170.5``\n * Half a day later:\n ``print celestial.JD(1582,10,15.5) ==> 2299161.0``\n * Unix reference:\n ``print celestial.JD(1970,1,1) ==> 2440587.5``\n\n \"\"\"\n#---------------------------------------------------------------------- \n if (month > 2):\n y = year\n m = month\n elif (month == 1 or month == 2):\n y = year - 1\n m = month + 12\n\n calday = year + month/100.0 + day / 10000.0\n\n if (calday > 1582.1015):\n A = int(y/100.0)\n B = 2 - A + int(A/4.0)\n else:\n B = 0\n\n if (calday > 0.0229): # Dates after 29 February year 0\n jd = int(365.25*y) + int(30.6001*(m+1)) + day + 1720994.50 + B\n else:\n jd = int(365.25*y-0.75) + int(30.6001*(m+1)) + day + 1720994.50 + B\n\n return jd\n\n\n\ndef longlat2xyz(longlat):\n \"\"\"\n-----------------------------------------------------------------------\nPurpose: Given two angles in longitude and latitude return \n corresponding Cartesian coordinates x,y,z\nInput: Sequence of positions e.g. ((a1,d1),(a2,d2), ...)\nReturns: Corresponding values of x,y,z in same order as input\nReference: -\nNotes: The three coordinate axes x, y and z, the set of \n right-handed Cartesian axes that correspond to the\n usual celestial spherical coordinate system. \n The xy-plane is the equator, the z-axis \n points toward the north celestial pole, and the \n x-axis points toward the origin of right ascension. \n-----------------------------------------------------------------------\n \"\"\"\n lon = d2r( n.asarray(longlat[:,0],'d').flatten(1) )\n lat = d2r( n.asarray(longlat[:,1],'d').flatten(1) )\n x = n.cos(lon)*n.cos(lat)\n y = n.sin(lon)*n.cos(lat)\n z = n.sin(lat)\n return n.mat((x,y,z))\n\n\n\ndef xyz2longlat(xyz):\n \"\"\"\n-----------------------------------------------------------------------\nPurpose: Given Cartesian x,y,z return corresponding longitude and \n latitude in degrees.\nInput: Sequence of tuples with values for x,y,z\nReturns: The same number of positions (longitude, latitude and in the\n same order as the input.\nReference: -\nNotes: Note that one can expect strange behavior for the values \n of the longitudes very close to the pole. In fact, at the \n poles itself, the longitudes are meaningless.\n-----------------------------------------------------------------------\n \"\"\"\n x = n.asarray(xyz[0],'d').flatten(1)\n y = n.asarray(xyz[1],'d').flatten(1)\n z = n.asarray(xyz[2],'d').flatten(1)\n\n lat = r2d( n.arctan2(z, n.sqrt(x*x+y*y)) )\n lon = r2d( n.arctan2(y, x) )\n# eps = n.array(0.00000001, 'd')\n# lon = n.where( ((abs(lat) > 89.9999) & (abs(x) < eps) & (abs(y) < eps)),\\\n# 0.0, r2d( n.arctan2(y, x)))\n lon = n.where(lon < 0.0, lon+360.0, lon)\n return n.mat([lon,lat]).T\n\n\n\n\ndef lon2hms(a, prec=1, delta=None, tex=False):\n#----------------------------------------------------------------------\n \"\"\"\nConvert an angle in degrees to **hours, minutes, seconds** format.\n\n:param a:\n Angle (in degrees) for which we want to create a formatted text label.\n:type a:\n Floating point number\n:param prec:\n The required number of decimals in the seconds part of output.\n If a value is omitted, then the default is 1.\n:type prec:\n Integer\n:param delta:\n If one labels world coordinates along an axis then the default labels\n are in hours, minutes and seconds with some decimal number. This is probably\n not want you want if the step size between subsequent positions is\n for example an integer number of degrees or minutes.\n Then you want labels showing only hours or hours and minutes.\n This function tries to find out whether this is the case (given a value\n for *delta*) or not. If so, a minimum length label is returned.\n:type delta:\n *None* or a floating point number\n:param tex:\n The default is *False*. If set to *True*, the string is formatted\n in LaTeX. Such labels can be plotted in, for example, Matplotlib.\n:type tex:\n Boolean\n\n:Returns:\n Formatted string representing the input angle.\n \n:Notes:\n Longitudes are forced into the range, 360 deg. and then\n converted to hours, minutes and seconds.\n\n:Examples:\n Format a position in hms and dms:\n\n >>> ra = 359.9999\n >>> dec = 0.0000123\n >>> print celestial.lon2hms(ra), celestial.lat2dms(dec)\n 00h 00m 0.0s +00d 00m 0.0s\n >>> print celestial.lon2hms(ra, 2), celestial.lat2dms(dec, 2)\n 23h 59m 59.98s +00d 00m 0.04s\n >>> print celestial.lon2hms(ra, 4), celestial.lat2dms(dec, 4)\n 23h 59m 59.9760s +00d 00m 0.0443s\n \"\"\"\n#----------------------------------------------------------------------\n degs = n.fmod(a, 360.0) # Now in range -360, 360\n if degs < 0.0:\n degs += 360.0\t\n if prec < 0:\n prec = 0\n # How many seconds is this. Round to 'prec'\n sec = n.round(degs*240.0, prec)\n sec = n.fmod(sec, 360.0*240.0) # Rounding can result in 360 deg again, so correct\n Isec = n.int(sec) # Integer seconds\n Fsec = sec - Isec # Fractional remainder\n hours = Isec / 3600.0\n Ihours = n.int(hours)\n secleft = Isec - Ihours*3600.0\n Imin = int(secleft / 60.0)\n secleft = secleft - Imin*60.0\n # print \"\\n prec Ideg, Imin, secleft, Fsec\", prec, Ideg, Imin, secleft, Fsec\n if tex:\n if prec > 0:\n hms = \"%d^h%.2d^m%.2d^s\" % (Ihours, Imin, secleft)\n fsec = \"%*.*d\" % (prec, prec, int(round(Fsec*10.0**prec,0)))\n s = r\"$\" + hms + fsec + \"$\"\n else:\n s1 = r\"$%d^h%.2d^m%.2d^s$\" % (Ihours, Imin, secleft)\n if delta == None:\n s = s1\n else:\n if (delta*3600.0) % (15.0*3600) == 0.0: # Only hours\n s = r\"$%d^h$\" % Ihours\n elif (delta*3600.0) % (15.0*60) == 0.0: # Only hours and minutes\n s = r\"$%d^h%.2d^m$\" % (Ihours, Imin)\n else:\n s = s1\n else:\n if prec > 0:\n s = \"%.2dh%.2dm%0*.*fs\" % (Ihours, Imin, prec+3, prec, secleft+Fsec)\n else:\n s = \"%.2dh%.2dm%2ds\" % (Ihours, Imin, secleft)\n return s\n\n\n\ndef lat2dms(a, prec=1, delta=None, tex=False):\n#----------------------------------------------------------------------\n \"\"\"\nConvert an angle in degrees into the **degrees, minutes, seconds** \nformat assuming it was a latitude. Its value should be in\nthe range -90 to 90 degrees\n\n\n:param a:\n Angle (in degrees) for which we want to create a formatted text label.\n:type a:\n Floating point number\n:param prec:\n The required number of decimals in the seconds part of output.\n If a value is omitted, then the default is 1.\n:type prec:\n Integer\n:param delta:\n If one labels world coordinates along an axis then the default labels\n are in degrees, minutes and seconds with some decimal number. This is probably\n not want you want if the step size between subsequent positions is\n for example an integer number of degrees or minutes.\n Then you want labels showing only degrees or degrees and minutes.\n This function tries to find out whether this is the case (given a value\n for *delta*) or not. If so, a minimum length label is returned.\n:type delta:\n *None* or a floating point number\n:param tex:\n The default is *False*. If set to *True*, the string is formatted\n in LaTeX. Such labels can be plotted in, for example, Matplotlib.\n:type tex:\n Boolean\n\n:Returns:\n Formatted string representing the input angle or a string\n with '#' characters indicating that the input was out of range.\n\n:Notes:\n The HMS and DMS format \n should be treated differently because their ranges in world\n coordinates are different.\n Longitudes should be in range of (0,360)\n degrees. So -10 deg is in fact 350 deg. and 370 deg is in\n fact 10 deg. Latitudes range from -90 to 90 degrees. Then 91\n degrees is in fact 89 degrees but at a longitude that is\n separated 180 deg. from the stated longitude. But we don't\n have control over the longitudes here so the only thing we\n can do is reject the value and return a dummy string.\n\n \"\"\"\n#----------------------------------------------------------------------\n\n if a > 90.0 or a < -90.0:\n return \"##d##m##s\";\n sign = 1;\n si = ' ' # one space\n if a < 0.0:\n sign = -1\n si = '-'\n degs = sign * a # Make positive\n if prec < 0:\n prec = 0\n # How many seconds is this. Round to 'prec'\n sec = n.round(degs*3600.0, prec)\n Isec = n.int(sec) # Integer seconds\n Fsec = sec - Isec # Fractional remainder\n degs = Isec / 3600.0\n Ideg = n.int(degs)\n secleft = Isec - Ideg*3600.0\n Imin = int(secleft / 60.0)\n secleft = secleft - Imin*60.0\n if tex:\n if prec > 0:\n dms = r\"%c%d^{\\circ}%.2d^{\\prime}%.2d^{\\prime\\prime}\" % (si, Ideg, Imin, secleft)\n fsec = \".%*.*d\" % (prec, prec, int(round(Fsec*10.0**prec,0)))\n s = r\"$\" + dms + fsec + \"$\"\n else:\n s1 = r\"$%c%d^{\\circ}%.2d^{\\prime}%.2d^{\\prime\\prime}$\" % (si, Ideg, Imin, secleft)\n if delta == None:\n s = s1\n else:\n if (delta*3600.0) % 3600 == 0.0: # Only degrees\n s = r\"$%c%d^{\\circ}$\" % (si,Ideg)\n elif (delta*3600.0) % 60 == 0.0: # Only degrees and minutes\n s = r\"$%c%d^{\\circ}%.2d^{\\prime}$\" % (si, Ideg, Imin)\n else:\n s = s1\n else:\n if prec > 0:\n s = \"%c%.2dd%.2dm%0*.*fs\" % (si, Ideg, Imin, prec+3, prec, secleft+Fsec)\n else:\n s = \"%c%.2dd%.2dm%2ds\" % (si, Ideg, Imin, secleft)\n return s\n\n\n\ndef lon2dms(a, prec=1, delta=None, tex=False):\n#----------------------------------------------------------------------\n \"\"\"\nConvert an angle in degrees to **degrees, minutes, seconds** format,\nassuming the input is a longitude but not associated with an equatorial\nsystem.\n\n:param a:\n Angle (in degrees) for which we want to create a formatted text label\n:type a:\n Floating point number\n:param prec:\n The required number of decimals in the seconds part of output\n If a value is omitted, then the default is 1.\n:type prec:\n Integer\n:param delta:\n If one labels world coordinates along an axis then the default labels\n are in hours, minutes and seconds with some decimal number. This is probably\n not want you want if the step size between subsequent positions is\n for example an integer number of degrees or minutes.\n Then you want labels showing only degrees or degrees and minutes.\n This function tries to find out whether this is the case (given a value\n for *delta*) or not. If so, a minimum length label is returned.\n:type delta:\n *None* or a floating point number\n:param tex:\n The default is *False*. If set to *True*, the string is formatted\n in LaTeX. Such labels can be plotted in, for example, Matplotlib.\n:type tex:\n Boolean\n\n:Returns:\n Formatted string representing the input angle.\n \n:Notes:\n Longitudes are forced into the range 0, 360 deg. and then\n converted to hours, minutes and seconds.\n\n:Examples:\n Format a longitude to dms:\n\n >>> print celestial.lon2dms(167.342, 4)\n 167d 20m 31.2000s\n >>> print celestial.lon2dms(-10, 4)\n 350d 0m 0.0000s\n\n \"\"\"\n#----------------------------------------------------------------------\n degs = n.fmod(a, 360.0) # Now in range -360, 360\n if (a < 0.0):\n degs += 360.0 # In range 0, 360 circle-wise\n if prec < 0:\n prec = 0\n # How many seconds is this. Round to 'prec'\n sec = n.round(degs*3600.0, prec)\n Isec = n.int(sec) # Integer seconds\n Fsec = sec - Isec # Fractional remainder\n degs = Isec / 3600.0\n Ideg = n.int(degs)\n secleft = Isec - Ideg*3600.0\n Imin = int(secleft / 60.0)\n secleft = secleft - Imin*60.0\n if tex:\n if prec > 0:\n dms = r\"%d^{\\circ}%.2d^{\\prime}%.2d^{\\prime\\prime}\" % (Ideg, Imin, secleft) \n fsec = \".%*.*d\" % (prec, prec, int(round(Fsec*10.0**prec,0)))\n s = r\"$\" + dms + fsec + \"$\"\n else:\n s1 = r\"$%d^{\\circ}%.2d^{\\prime}%.2d^{\\prime\\prime}$\" % (Ideg, Imin, secleft)\n if delta == None:\n s = s1\n else:\n if (delta*3600.0) % 3600 == 0.0: # Only degrees\n s = r\"$%d^{\\circ}$\" % Ideg\n elif (delta*3600.0) % 60 == 0.0: # Only degrees and minutes\n s = r\"$%d^{\\circ}%.2d^{\\prime}$\" % (Ideg, Imin)\n else:\n s = s1\n else:\n if prec > 0:\n s = \"%4dd%2dm%0*.*fs\" % (Ideg, Imin, prec+3, prec, secleft+Fsec)\n else:\n s = \"%4dd%2dm%2ds\" % (Ideg, Imin, secleft)\n return s\n\n\n\ndef JD2epochBessel(JD):\n#----------------------------------------------------------------------\n \"\"\"\nConvert a Julian date to a Besselian epoch.\n\n:param JD:\n Julian date (e.g. 2445700.5)\n:type JD:\n Floating point number\n \n:Returns:\n Besselian epoch (e.g. 1983.9)\n\n:Reference:\n Standards Of Fundamental Astronomy,\n \n http://www.iau-sofa.rl.ac.uk/2003_0429/sofa/epb.html\n\n:Notes:\n e.g. 2445700.5 -> 1983.99956681\n\n One *Tropical Year* is 365.242198781 days and\n JD(1900) = 2415020.31352\n \n If we know the JD then the Besselian epoch can be\n calculated with:\n \n ``BE = B[1900 + (JD - 2415020.31352)/365.242198781]``\n\n Expression corresponds to the IAU SOFA expression in the reference\n with:\n ``2451545-36524.68648 = 2415020.31352``\n\n \"\"\"\n#---------------------------------------------------------------------- \n return 1900.0 + (JD-2415020.31352)/365.242198781\n\n\n\ndef epochBessel2JD(Bepoch):\n#----------------------------------------------------------------------\n \"\"\"\nConvert a Besselian epoch to a Julian date\n\n:param Bepoch:\n Besselian epoch in format nnnn.nn\n:type Bepoch:\n Floating point number\n \n:Returns:\n Julian date\n\n:Reference:\n See: :func:`JD2epochBessel`\n \n:Notes:\n e.g. 1983.99956681 converts into 2445700.5\n It's the inverse of :func:`JD2epochBessel`\n\n \"\"\"\n#----------------------------------------------------------------------\n return (Bepoch-1900.0)*365.242198781 + 2415020.31352\n\n\n\ndef JD2epochJulian(JD):\n#---------------------------------------------------------------------\n \"\"\"\nConvert a Julian date to a Julian epoch\n\n:param JD:\n Julian date\n:type JD:\n Floating point number\n \n:Returns:\n Julian epoch\n\n:Reference:\n Standards Of Fundamental Astronomy,\n \n http://www.iau-sofa.rl.ac.uk/2003_0429/sofa/epj.html\n\n:Notes:\n e.g. ``2445700.5 converts into 1983.99863107``\n Assuming years of exactly 365.25 days, we can\n calculate a Julian epoch from a Julian date.\n Expression corresponds to IAU SOFA routine 'epj'\n\n \"\"\"\n#----------------------------------------------------------------------\n return 2000.0 + (JD - 2451545.0)/365.25\n\n\n\ndef epochJulian2JD(Jepoch):\n#----------------------------------------------------------------------\n \"\"\"\nConvert a Julian epoch to a Julian date\n\n:param Jepoch:\n Julian epoch (in format nnnn.nn)\n:type Jepoch:\n Floating point number\n \n:Returns:\n Julian date\n\n:Reference:\n See :func:`JD2epochJulian`\n \n:Notes:\n e.g. ``1983.99863107 converts into 2445700.5``\n It's the inverse of function JD2epochJulian\n\n \"\"\"\n#----------------------------------------------------------------------\n return (Jepoch-2000.0)*365.25 + 2451545.0\n\n\n\ndef obliquity1980(jd):\n#----------------------------------------------------------------------\n \"\"\"\nWhat is the obliquity of the ecliptic at this Julian date? (IAU 1980 model)\n\n:param jd:\n Julian date\n:type jd:\n Floating point number\n \n:Returns:\n Mean obliquity in degrees\n\n:Reference:\n Explanatory Supplement to the Astronomical Almanac,\n P. Kenneth Seidelmann (ed), University Science Books (1992),\n Expression 3.222-1 (p114).\n\n:Notes:\n The epoch is entered in Julian date and the time is calculated \n w.r.t. J2000.\n \n The obliquity is the angle between the mean equator and\n ecliptic, or, between the ecliptic pole and mean celestial\n pole of date\n\n \"\"\"\n#----------------------------------------------------------------------\n # T = (Date - 1 jan, 2000, 12h noon)\n T = (jd-2451545.0)/36525.0\n eps = (84381.448+(-46.8150+(-0.00059+0.001813*T)*T)*T) / 3600.0\n return eps\n\n\n\ndef obliquity2000(jd):\n#----------------------------------------------------------------------\n \"\"\"\nWhat is the obliquity of the ecliptic at this Julian date?\n(IAU model 2000)\n\n:param jd:\n Julian date\n:type jd:\n Floating point number\n \n:Returns:\n Mean obliquity in degrees\n\n:Reference:\n Fukushima, T. 2003, AJ, 126,1\n Kaplan, H., 2005, The IAU Resolutions\n on Astronomical Reference Systems,\n Time Scales, and Earth Rotation Models,\n United States Naval Observatory circular no. 179,\n http://aa.usno.navy.mil/publications/docs/Circular_179.pdf\n (page 44)\n \n:Notes:\n The epoch is entered in Julian date and the time is calculated\n w.r.t. J2000.\n \n The obliquity is the angle between the mean equator and\n ecliptic, or, between the ecliptic pole and mean celestial\n pole of date.\n \n \"\"\"\n#----------------------------------------------------------------------\n # T = (Date - 1 jan, 2000, 12h noon)\n T = (jd-2451545.0)/36525.0\n\n eps = (84381.406 +\n ( -46.836769 +\n ( -0.0001831 +\n ( 0.00200340 +\n ( -0.000000576 +\n ( -0.0000000434 )*T)*T)*T)*T)*T) / 3600.0\n return eps\n\n\n\ndef IAU2006precangles(epoch):\n#----------------------------------------------------------------------\n \"\"\"\nCalculate IAU 2000 precession angles for precession from\ninput epoch to J2000.\n\n:param epoch:\n Julian epoch of observation.\n:type epoch:\n Floating point number\n \n:Returns:\n Angles \\u03B6 (zeta), z, \\u03B8 (theta) in degrees to setup a rotation matrix\n to transform from J2000 to input epoch.\n \n:Reference:\n Capitaine N. et al., IAU 2000 precession A&A 412, 567-586 (2003)\n\n:Notes:\n Input are Julian epochs!\n ``T = (jd-2451545.0)/36525.0``\n Combined with ``jd = Jepoch-2000.0)*365.25 + 2451545.0`` gives:\n (see module code at function *epochJulian2JD(epoch)*)\n ``T = (epoch-2000.0)/100.0``\n\n This function should be updated as soon as there are IAU2006 adopted\n angles to replace the angles used in this function.\n\n \"\"\"\n#----------------------------------------------------------------------\n # T = (Current epoch - 1 jan, 2000, 12h noon)\n T = (epoch-2000.0)/100.0\n d0 = 2.5976176\n d1 = 2306.0809506\n d2 = 0.3019015\n d3 = 0.0179663\n d4 = -0.0000327\n d5 = -0.0000002\n zeta_a = T*(d1+T*(d2+T*(d3+T*(d4+T*(d5)))))+d0\n d0 = -2.5976176\n d1 = 2306.0803226\n d2 = 1.0947790\n d3 = 0.0182273\n d4 = 0.0000470\n d5 = -0.0000003\n z_a = T*(d1+T*(d2+T*(d3+T*(d4+T*(d5)))))+d0\n d0 = 0.0\n d1 = 2004.1917476\n d2 = -0.4269353\n d3 = -0.0418251\n d4 = -0.0000601\n d5 = -0.0000001\n theta_a = T*(d1+T*(d2+T*(d3+T*(d4+T*(d5)))))+d0\n # Return values in degrees\n return zeta_a/3600.0, z_a/3600.0, theta_a/3600.0\n\n\n\ndef Lieskeprecangles(jd1, jd2):\n#----------------------------------------------------------------------\n \"\"\"\nCalculate IAU 1976 precession angles for a precession\nof epoch corresponding to Julian date jd1 to epoch corresponds\nto Julian date jd2.\n\n:param jd1:\n Julian date for start epoch\n:type jd1:\n Floating point number \n:param jd2:\n Julian date for end epoch\n:type jd2:\n Floating point number \n \n:Returns:\n Angles \\u03B6 (zeta), z, \\u03B8 (theta) degrees\n\n:Reference:\n Lieske,J.H., 1979. Astron.Astrophys.,73,282.\n equations (6) & (7), p283.\n\n:Notes:\n The ES (Explanatory Supplement to the Astronomical Almanac)\n lists for a IAU1976 precession from 1984, January 1d0h to J2000\n the angles in **arcsec**: ``xi_a=368.9985, ze_a=369.0188 and th_a=320.7279``\n Using the functions in this module, this can be calculated\n by applying:\n\n >>> jd1 = celestial.JD(1984,1,1)\n >>> jd2 = celestial.JD(2000,1,1.5)\n >>> print celestial.Lieskeprecangles(jd1, jd2)\n (0.10249958598931658, 0.10250522534285664, 0.089091092843880629)\n >>> print [a*3600 for a in angles]\n [368.99850956153966, 369.01881123428387, 320.72793423797026]\n\n The function returns values in degrees, while literature values\n often are listed in seconds of arc.\n\n\n Lieske's fit belongs to the so called Quasi-Linear Types\n Below a table with the precision (according to IAU SOFA):\n\n * 1960AD to 2040AD: < 0.1\"\n * 1640AD to 2360AD: < 1\"\n * 500BC to 3000AD: < 3\"\n * 1200BC to 3900AD: > 10\"\n * < 4200BC or > 5600AD: > 100\"\n * < 6800BC or > 8200AD: > 1000\"\n \n \"\"\"\n#----------------------------------------------------------------------\n # T = (Current epoch - 1 jan, 2000, 12h noon)\n T = (jd1-2451545.0)/36525.0\n t = (jd2-jd1)/36525.0\n\n d1 = 2306.2181\n d2 = 1.39656\n d3 = -0.000139\n d4 = 0.30188\n d5 = 0.000344\n d6 = 0.017998\n D1 = d1 + T*(d2+T*d3)\n zeta_a = t*(D1 + t*((d4+d5*T) + t*d6))\n # d1 = 2306.2181\n # d2 = 1.39656\n # d3 = -0.000139\n d4 = 1.09468\n d5 = -0.000066\n d6 = 0.018203\n z_a = t*(D1 + t*((d4+d5*T) + t*d6))\n d1 = 2004.3109\n d2 = -0.85330\n d3 = -0.000217\n d4 = -0.42665\n d5 = -0.000217\n d6 = -0.041833\n D1 = d1 + T*(d2+T*d3)\n theta_a = t*(D1 + t*((d4+d5*T) + t*d6))\n # Return values in degrees\n return zeta_a/3600.0, z_a/3600.0, theta_a/3600.0\n\n\n\ndef Newcombprecangles(epoch1, epoch2):\n#----------------------------------------------------------------------\n \"\"\"\nCalculate precession angles for a precession in FK4, using\nNewcomb's method (Woolard and Clemence angles)\n\n:param epoch1:\n Besselian start epoch\n:type epoch1:\n Floating point number\n:param epoch2:\n Besselian end epoch\n:type epoch2:\n Floating point number\n\n\n:Returns:\n Angles \\u03B6 (zeta), z, \\u03B8 (theta) degrees\n \n:Reference:\n ES 3.214 p.106\n \n:Notes:\n Newcomb's precession angles for old catalogs (FK4),\n see ES 3.214 p.106.\n Input are **Besselian epochs**!\n Adopted accumulated precession angles from equator\n and equinox at B1950 to 1984 January 1d 0h according\n to ES (table 3.214.1, p 107) are:\n ``zeta=783.7092, z=783.8009 and theta=681.3883``\n The Woolard and Clemence angles (derived in this routine)\n are:\n ``zeta=783.70925, z=783.80093 and theta=681.38830``\n (see same ES table as above).\n \n This routine found (in seconds of arc):\n ``zeta,z,theta = 783.709246271 783.800934641 681.388298284``\n for ``t1 = 0.1`` and ``t2 = 0.133999566814``\n using the lines in the next example.\n\n:Examples: From an interactive Python session:\n \n >>> b1 = 1950.0\n >>> b2 = celestial.epochs(\"F1984-01-01\")[0]\n >>> print [x*3600 for x in celestial.Newcombprecangles(be1, be2)]\n [783.70924627097793, 783.80093464073127, 681.38829828393466]\n\n \"\"\"\n#----------------------------------------------------------------------\n t1 = (epoch1-1850.0)/1000.0 #1000 tropical years\n t2 = (epoch2-1850.0)/1000.0\n tau = t2 - t1\n\n d0 = 23035.545; d1 = 139.720; d2 = 0.060; d3 = 30.240; d4 = -0.27; d5 = 17.995\n a0 = d0 + t1*(d1+d2*t1); a1 = d3 + d4*t1; a2 = d5\n zeta_a = tau*(a0+tau*(a1+tau*a2))\n\n d0 = 23035.545; d1 = 139.720; d2 = 0.060; d3 = 109.480; d4 = 0.39; d5 = 18.325\n a0 = d0 + t1*(d1+d2*t1); a1 = d3 + d4*t1; a2 = d5\n z_a = tau*(a0+tau*(a1+tau*a2))\n\n d0 = 20051.12; d1 = -85.29; d2 = -0.37; d3 = -42.65; d4 = -0.37; d5 = -41.80\n a0 = d0 + t1*(d1+d2*t1); a1 = d3 + d4*t1; a2 = d5\n theta_a = tau*(a0+tau*(a1+tau*a2))\n # Return values in degrees\n return zeta_a/3600.0, z_a/3600.0, theta_a/3600.0\n\n\n\ndef rotX(angle):\n \"\"\"\n-----------------------------------------------------------------------\nPurpose: Calculate the matrix that represents a 3d rotation\n around the X axis.\nInput: Rotation angle in degrees\nReturns: A 3x3 matrix representing the rotation about angle around \n X axis. \nReference: Diebel, J. 2006, Stanford University, Representing Attitude:\n Euler angles, Unit Quaternions and Rotation Vectors.\n http://ai.stanford.edu/~diebel/attitude.html\n\nNotes: Return the rotation matrix for a rotation around the X axis.\n This is a rotation in the YZ plane. Note that we construct\n a new vector with: xnew = R1.x\n In the literature, this rotation is usually called R1\n-----------------------------------------------------------------------\n \"\"\"\n a = d2r(angle)\n v = n.asmatrix(n.zeros((3,3), 'd'))\n cosa = n.cos(a)\n sina = n.sin(a)\n v[0,0] = 1.0; v[0,1] = 0.0; v[0,2] = 0.0;\n v[1,0] = 0.0; v[1,1] = cosa; v[1,2] = sina;\n v[2,0] = 0.0; v[2,1] = -sina; v[2,2] = cosa;\n return v\n\n\n\ndef rotY(angle):\n \"\"\"\n-----------------------------------------------------------------------\nDocumentation in 'rotX'\nReturn rot. mat. for rot. around Y axis\n-----------------------------------------------------------------------\n \"\"\"\n a = d2r(angle)\n v = n.asmatrix(n.zeros((3,3), 'd'))\n cosa = n.cos(a)\n sina = n.sin(a)\n v[0,0] = cosa; v[0,1] = 0.0; v[0,2] = -sina;\n v[1,0] = 0.0; v[1,1] = 1.0; v[1,2] = 0.0;\n v[2,0] = sina; v[2,1] = 0.0; v[2,2] = cosa;\n return v\n\n\n\ndef rotZ(angle):\n \"\"\"\n-----------------------------------------------------------------------\nDocumentation in 'rotX'\nReturn rot. mat. for rot. around Z axis\n-----------------------------------------------------------------------\n \"\"\"\n a = d2r(angle)\n v = n.asmatrix(n.zeros((3,3), 'd'))\n cosa = n.cos(a)\n sina = n.sin(a)\n v[0,0] = cosa; v[0,1] = sina; v[0,2] = 0.0;\n v[1,0] = -sina; v[1,1] = cosa; v[1,2] = 0.0;\n v[2,0] = 0.0; v[2,1] = 0.0; v[2,2] = 1.0;\n return v\n\n\n\ndef fitsdate(date):\n \"\"\"\n-----------------------------------------------------------------------\nPurpose: Given a string from a FITS file, try to parse it and \n convert the string into three parts: an integer year, an\n integer month and a fractional day.\nInput: A string, representing a date in FITS format\nReturns: Integer year, integer month, fractional day.\nReference: -\nNotes: Process the FITS dates as part of the 'epochs' function. \n It processes the following formats:\n DD/MM/YY or DD/MM/19YY\n YYYY-MM-DD\n YYYY-MM-DDTHH:MM:SS\n-----------------------------------------------------------------------\n \"\"\"\n parts = date.split('/')\n if len(parts)==3:\n return ((int(parts[2])%1900)+1900, int(parts[1]), float(parts[0]))\n\n parts = date.split('T')\n if len(parts)==2:\n date = parts[0]\n parts = parts[1].split(':')\n facts = (3600.0, 60.0, 1.0)\n time = 0.0\n for i in range(len(parts)):\n time += float(parts[i])*facts[i] \n else:\n time = 0.0\n parts = date.split('-')\n return (int(parts[0]), int(parts[1]), float(parts[2])+time/86400.0)\n\n\n\ndef epochs(spec):\n#-----------------------------------------------------------------------\n \"\"\"\nFlexible epoch parser. The functions in this module have different\ninput parameters (Julian epoch, Besselian epochs, Julian dates) because\nthe algorithms came from different sources. What we needed was a routine\nthat could convert a string which represents a date in various formats,\nto values for a Julian epoch, Besselian epochs and a Julian date.\nThis function returns these value for any valid input date.\n\nFor the epoch syntax read the documentation at :ref:`celestial-epochs`.\nNote that an epoch of observation is either a second epoch in the string\n(the first is always the equinox) or the epoch string has\na suffix '_' which may be follwed by arbitrary characters.\n\n:param spec:\n An epoch specification (see below)\n:type spec:\n String\n \n:Returns:\n Calculated corresponding **Besselian epoch**, **Julian epoch** and **Julian date**.\n Return in order: *B, J, JD*\n \n:Reference:\n Various sources listing Julian dates.\n \n:Notes:\n\n:Examples: Some checks:\n\n >>> celestial.epochs('F2008-03-31T8:09') # should return:\n (2008.2474210134737, 2008.2459673739454, 2454556.8395833336)\n >>> celestial.epochs('F2007-01-14T13:18:59.9')\n (2007.0378545262108, 2007.0364267212976, 2454115.0548599539)\n >>> celestial.epochs(\"j2007.0364267212976\")\n (2007.0378545262108, 2007.0364267212976, 2454115.0548599539)\n >>> celestial.epochs(\"b2007.0378545262108\")\n (2007.0378545262108, 2007.0364267212976, 2454115.0548599539)\n \n \"\"\"\n#-----------------------------------------------------------------------\n\n if not spec:\n mes = \"No epoch in string\"\n raise Exception(mes)\n\n b = j = jd = None\n\n i = spec.find('_')\n if i != -1:\n spec = spec[:i]\n\n parts = re_split(r'(\\d.*)', spec, 1)\n\n try:\n prefix = (parts[0].strip().upper())\n if prefix == 'B' or prefix == '-B':\n b = float(parts[1])\n if prefix == '-B':\n b *= -1.0\n jd = epochBessel2JD(b)\n j = JD2epochJulian(jd)\n elif prefix == 'J' or prefix == '-J':\n j = float(parts[1])\n if prefix == '-J':\n j *= -1.0\n jd = epochJulian2JD(j)\n b = JD2epochBessel(jd)\n elif prefix == 'JD':\n jd = float(parts[1])\n b = JD2epochBessel(jd)\n j = JD2epochJulian(jd)\n elif prefix == 'MJD':\n mjd = float(parts[1])\n # MJD = JD - 2400000.5\n jd = mjd + 2400000.5\n b = JD2epochBessel(jd)\n j = JD2epochJulian(jd)\n elif prefix == 'RJD':\n rjd = float(parts[1])\n # RJD = JD - 2400000\n jd = rjd + 2400000\n b = JD2epochBessel(jd)\n j = JD2epochJulian(jd)\n elif prefix == 'F':\n epoch = parts[1];\n fd = fitsdate(parts[1])\n jd = JD(fd[0], fd[1], fd[2])\n b = JD2epochBessel(jd)\n j = JD2epochJulian(jd)\n else:\n raise Exception(\"Unknown prefix for epoch\")\n except:\n mes = \"No prefix or cannot convert epoch to a number\"\n raise Exception(mes)\n\n return (b, j, jd)\n\n\n\ndef MatrixEqJ20002Gal():\n#-----------------------------------------------------------------------\n \"\"\"\nPurpose: (Experimental) Return the rotation matrix for a transformation\n between equatorial (FK5, J2000) and galactic IAU 1958 \n\t coordinate systems. This function is not used because it could\n be composed of two fundamental transformations.\nInput: -\nReturns: Matrix M as in: XYZgal = M * XYZj2000\nReference:-Murray, C.A. The Transformation of coordinates between the \n systems B1950.0 and J2000.0, and the principal galactic axes\n\t referred to J2000.0, \n Astronomy and Astrophysics (ISSN 0004-6361), vol. 218, no. 1-2, \n\t July 1989, p. 325-329.\n -Blaauw, A., Gum C.S., Pawsey, J.L., Westerhout, G.: 1958, \n\t Monthly Notices Roy. Astron. Soc. 121, 123\nNotes: The position of the galactic pole is defined in the fk4, B1950\n system (without e-terms).\n For a position in fk5 J2000 one could consider to create a\n rotation matrix based on J2000 coordinates of the galactic pole.\n\n 192.85948121 -RA of galactic north pole (mean b1950.0)\n 27.12825118 -Dec of galactic north pole\n 122.93191857 -Galactic longitude of celestial equator\n\n >>> print celestial.sky2sky(celestial.fk4_no_e, celestial.fk5,192.25,27.4)\n [[ 192.85948121 27.12825118]]\n >>> print celestial.sky2sky( celestial.fk5, celestial.gal, 0,90)\n [[ 122.93191857 27.12825118]]\n\n According to the Hipparcos explanatory supplement the angles\n in J2000 are:\n 192.85948 Right Ascension of Galactic North Pole\n 27.12825 Declination of Galactic North Pole\n 32.93192 Galactic longitude of celestial equator \n\n HOWEVER:\n Murray (1989) however objects against the transformation of \n these principal directions because in the J2000 system the \n axes are not orthogonal, which is unacceptable for a transformation.\n Therefore the\n transformation from fk5 to galactic is calculated in two steps. \n First a position is transformed to fk4 (no e-terms) and then \n to a galactic coordinate (lII, bII) \n The result matrix in celestial.py is calculated with: \n skymatrix((eq,\"J2000.0\",fk5),gal)\n and produces the numbers:\n [[-0.054875539396 -0.873437104728 -0.48383499177 ]\n [ 0.494109453628 -0.444829594298 0.7469822487 ]\n [-0.867666135683 -0.198076389613 0.455983794521]]\n which are all consistent with equation (33) in Murray, 1989.\n\n If, on the other hand we calculate the rotation matrix for the J2000\n coordinates:\n >>> R = rotZ(180-122.93191857)*rotY(90-27.12825118)*rotZ(192.85948121)\n >>> print skymatrix((eq,\"J2000.0\",fk5),gal)[0] - R\n [[ -4.26766400e-11 -1.39604994e-11 3.00424130e-11]\n [ -9.72683045e-12 4.29156710e-12 8.98969787e-12]\n [ -2.84006152e-12 5.19224108e-11 1.71504477e-11]]\n\n then we cannot conclude that these different methods differ\n significantly.\n\n In the 2MASS All-Sky Data Release Explanatory Supplement:\n we read:\n 'There is an ambiguity in the appropriate way to convert J2000\n ICRS coordinates to the galactic system. Galactic coordinates\n could be derived by precessing J2000.0 coordinates to B1950, \n then using the rotation transformations into the lII,bII \n system (as in MatrixEqB19502Gal(), VOG). \n This transformation method produces galactic coordinates\n that can differ \n by up to 0.4'' from those, e.g., produced using the direct\n J2000-to-galactic transformations, proposed by Murray \n (1989, AsAp, 218, 325).'\n\n Murray's matrix however is composed of the transformation fk5\n to fk4 without e-terms and fk4 without e-terms to Galactic. \n So the differences can only be explained by \n wrongly adding e-terms in fk4 before transforming these to\n galactic coordinates.\n \"\"\"\n#-----------------------------------------------------------------------\n M1 = FK52FK4Matrix()\n M2 = MatrixEqB19502Gal()\n return M2*M1\n\n\n\ndef MatrixEqB19502Gal():\n#-----------------------------------------------------------------------\n \"\"\"\nCreate matrix to convert equatorial fk4 coordinates\n(without e-terms) to IAU 1958 lII,bII system of\ngalactic coordinates.\n\n:Parameters:\n None\n \n:Results:\n 3x3 Matrix M as in XYZgal = M * XYZb1950\n\n:Reference:\n \n 1. Blaauw, A., Gum C.S., Pawsey, J.L., Westerhout, G.: 1958,\n 2. Monthly Notices Roy. Astron. Soc. 121, 123,\n 3. Blaauw, A., 2007. Private communications.\n\n:Notes:\n Original definitions from 1.:\n \n * The new north galactic pole lies in the direction\n alpha = 12h49m (192.25 deg),\n delta=27.4 deg (equinox 1950.0).\n * The new zero of longitude is the great semicircle\n originating at the new north galactic pole at the\n position angle theta = 123 deg with respect\n to the equatorial pole for 1950.0.\n * Longitude increases from 0 to 360 deg. The sense is\n such that, on the galactic equator increasing galactic\n longitude corresponds to increasing Right Ascension.\n Latitude increases from -90 deg through 0 deg to 90 deg\n at the new galactic pole.\n\n Given the RA and Dec of the galactic pole, and using the\n Euler angles scheme::\n\n M = rotZ(a3).rotY(a2).rotZ(a1)\n\n We first rotate the spin vector of the XY plane about\n an angle a1 = ra_pole and then rotate the spin vector\n in the XZ plane (i.e. around the Y axis) with an angle\n a2=90-dec_pole to point it in the right declination.\n\n Now think of a circle with the galactic pole as its center.\n The radius is equal to the distance between this center\n and the equatorial pole. The zero point now is on the circle\n and opposite to this pole.\n \n We need to rotate along this circle (i.e. a rotation\n around the new Z-axis) in a way that the angle between the\n zero point and the equatorial pole is equal to 123 deg.\n So first we need to compensate for the 180 deg of the\n current zero longitude, opposite to the pole. Then we need\n to rotate about an angle 123 deg but in a way that increasing\n galactic longitude corresponds to increasing Right Ascension\n which is opposite to the standard rotation of this circle\n (note that we rotated the original X axis about 192.25 deg).\n The last rotation angle therefore is a3=+180-123::\n\n M = rotZ(180-123.0)*rotY(90-27.4)*rotZ(192.25)\n\n The composed rotation matrix is the same as in Slalib's 'ge50.f'\n and the matrix in eq. (32) of Murray (1989).\n \"\"\"\n#-----------------------------------------------------------------------\n\n return rotZ(180-123.0)*rotY(90-27.4)*rotZ(192.25)\n # Alternative: rotZ(-33.0)*rotX(62.6)*rotZ(90+192.25)\n\n\n\n\ndef MatrixGal2Sgal():\n#-----------------------------------------------------------------------\n \"\"\"\nTransform galactic to supergalactic coordinates\n\n:Parameters: \n None\n \n:Returns: \n Matrix M as in XYZsgal = M * XYZgal\n\n:Reference: \n Lahav, O., The supergalactic plane revisited with the \n Optical Redshift Survey\n Mon. Not. R. Astron. Soc. 312, 166-176 (2000)\n\n:Notes: \n The Supergalactic equator is conceptually defined by the \n plane of the local (Virgo-Hydra-Centaurus) supercluster,\n and the origin of supergalactic longitude is at the\n intersection of the supergalactic and galactic planes. \n (de Vaucouleurs) \n \n North SG pole at l=47.37 deg, b=6.32 deg. \n Node at l=137.37, sgl=0 (inclination 83.68 deg).\n\n Older references give for he position of the SG node 137.29\n which differs from 137.37 deg in the official definition.\n\n For the rotation matrix we chose the scheme *Rz.Ry.Rz*\n Then first we rotate about 47.37 degrees along the Z-axis\n followed by a rotation about 90-6.32 degrees is needed to\n set the pole to the right declination.\n The new plane intersects the old one at two positions.\n One of them is l=137.37, b=0 (in galactic coordinates).\n If we want this to be sgl=0 we have to rotate this plane along\n the new Z-axis about an angle of 90 degrees. So the composed\n rotation matrix is::\n \n M = Rotz(90)*Roty(90-6.32)*Rotz(47.37)\n \"\"\"\n#----------------------------------------------------------------------\n # Alternative rotX(90-6.32)*rotZ(90+47.37)\n return rotZ(90.0)*rotY(90-6.32)*rotZ(47.37)\n\n\n\n\ndef MatrixEq2Ecl(epoch, S1):\n#----------------------------------------------------------------------\n \"\"\"\nCalculate a rotation matrix to convert equatorial \ncoordinates to ecliptical coordinates\n\n:param epoch:\n Epoch of the equator and equinox of date \n:type epoch: Floating point number\n \n:param S1:\n equatorial system to determine if one entered epoch in\n B or J coordinates.\n:type S1:\n Integer\n\n:Returns: \n 3x3 Matrix M as in XYZecl = M * XYZeq\n\n:Reference: \n Representations of celestial coordinates in FITS, \n Calabretta. M.R., & Greisen, E.W., (2002)\n Astronomy & Astrophysics, 395, 1077-1122.\n http://www.atnf.csiro.au/people/mcalabre/WCS/ccs.pdf\n\n:Notes: \n 1. The origin for ecliptic longitude is the vernal equinox.\n Therefore the coordinates of a fixed object is subject to \n shifts due to precession. The rotation matrix \n uses the obliquity to do the conversion to the wanted ecliptic \n coordinates.\n So we always need to enter an epoch. Usually this is J2000,\n but it can also be the epoch of date. The additional reference\n system indicates whether we need a Besselian or a Julian \n epoch.\n\n 2. In the FITS paper of Calabretta and Greisen (2002), one \n observes the following relations to FITS:\n \n -Keyword RADESYSa sets the catalog system FK4, FK4-NO-E or FK5\n This applies to equatorial and ecliptical coordinates with \n the exception of FK4-NO-E.\n \n -FK4 coordinates are not strictly spherical since they include \n a contribution from the elliptic terms of aberration, the \n so-called e-terms which amount to max. 343 milliarcsec. \n FITS paper: *'Strictly speaking, therefore, a map obtained from, \n say, a radio synthesis telescope, should be regarded\n as FK4-NO-E unless it has been appropriately re-sampled\n or a distortion correction provided.\n In common usage, however, CRVALia for such maps is usually \n given in FK4 coordinates. In doing so, the e-terms are effectively\n corrected to first order only.'*. (See also ES, eq. 3.531-1 page 170.\n \n -Keyword EQUINOX sets the epoch of the mean equator and equinox.\n \n -Keyword EPOCH is often used in older FITS files. It is a deprecated keyword\n and should be replaced by EQUINOX.\n It does not require keyword RADESYS. From its value we derive\n whether the reference system is FK4 or FK5 (the marker value is 1984.0)\n \n -Ecliptic coordinates require the epoch of the equator and equinox\n of date.\n This will be taken as the time of observation rather than\n EQUINOX. \n \n FITS paper: *'The time of observation may also be required for\n other astrometric purposes in addition to the usual astrophysical\n uses, for example, to specify when the mean place was\n correct in accounting for proper motion, including \"fictitious\"\n proper motions in the conversion between the FK4 and FK5 systems.\n The old *DATE-OBS* keyword may be used for this purpose.\n However, to provide a more convenient specification we\n here introduce the new keyword MJD-OBS'.*\n \n So MJD-OBS is the modified Julian Date (JD - 2400000.5) of the\n start of the observation.\n\n 3. Equatorial to ecliptic transformations use the time dependent \n obliquity of the equator (also known as the obliquity of the ecliptic).\n Again, start with::\n \n M = rotZ(0).rotX(eps).rotZ(0) = E.rotX(eps).E = rotX(eps)\n \n In fact this is only a rotation around the X axis\n \"\"\"\n#----------------------------------------------------------------------\n if (S1 == fk4):\n jd = epochBessel2JD(epoch)\n else: # For all other systems the epochs are Julian\n jd = epochJulian2JD(epoch)\n if (S1 == icrs or S1 == j2000):\n eps = obliquity2000(jd)\n else:\n eps = obliquity1980(jd)\n return rotX(eps)\n\n\n\n\ndef getEterms(epoch):\n#----------------------------------------------------------------------\n \"\"\"\nCompute the E-terms (elliptic terms of aberration) for a given epoch.\n\n:param epoch:\n A **Besselian** epoch\n:type epoch:\n Floating point number\n \n:Returns: \n A tuple containing the e-terms vector \n *(DeltaD,DeltaC,DeltaC.tan(e0))*\n\n:Reference: \n Seidelman, P.K., 1992. Explanatory Supplement to the Astronomical\n Almanac. University Science Books, Mill Valley\n\n:Notes: \n The method is described on page 170/171 of the ES.\n One needs to process the e-terms for the appropriate\n epoch This routine returns the e-term vector for arbitrary epoch.\n \"\"\"\n#----------------------------------------------------------------------\n # Julian centuries since B1950\n T = (epoch-1950.0)*1.00002135903/100.0\n # Eccentricity of the Earth's orbit\n ec = 0.01673011-(0.00004193+0.000000126*T)*T\n # Mean obliquity of the ecliptic. Method is different compared to \n # functions for the obliquity defined earlier. This function depends\n # on time wrt. epoch 1950 not epoch 2000.\n ob = (84404.836-(46.8495+(0.00319+0.00181*T)*T)*T)\n ob = d2r(ob/3600.0)\n # Mean longitude of perihelion of the solar orbit\n p = (1015489.951+(6190.67+(1.65+0.012*T)*T)*T)\n p = d2r(p/3600.0)\n # Calculate the E-terms vector\n ek = ec*d2r(20.49522/3600.0) # 20.49552 is constant of aberration at J2000\n cp = n.cos(p)\n # -DeltaD DeltaC DeltaC.tan(e0)\n return (ek*n.sin(p), -ek*cp*n.cos(ob), -ek*cp*n.sin(ob))\n\n\n\ndef addEterms(xyz, a=None):\n#----------------------------------------------------------------------\n \"\"\"\nAdd the elliptic component of annual aberration when the\nresult must be a catalogue fk4 position.\n\n:param xyz:\n Cartesian position(s) converted from \n lonlat = [ (a1,d1),(a2,d2), ..., (an,dn) ] -->\n xyz = [ (x1,y1,z1), (x2,y2,z2), ..., (xn,yn,zn) ]\n:type xyz:\n NumPy (n,2) matrix\n:param a:\n E-terms vector (as returned by getEterms())\n If input *a* is omitted (i.e. *a == None*), the e-terms for\n 1950 will be substituted.\n:type a:\n Tuple with 3 floating point numbers\n\n:Result:\n **Apparent place**, NumPy (n,2) matrix \n\n:Reference: \n * Seidelman, P.K., 1992. Explanatory Supplement to the Astronomical\n Almanac. University Science Books, Mill Valley.\n * Yallop et al, Transformation of mean star places,\n AJ, 1989, vol 97, page 274\n * Stumpff, On the relation between Classical and Relativistic\n Theory of Stellar Aberration, \n Astron, Astrophys, 84, 257-259 (1980)\n\n:Notes: \n There is a so called ecliptic component in the stellar aberration.\n This vector depends on the epoch at which we want to process\n these terms. It corresponds to the component of the earth's velocity\n perpendicular to the major axis of the ellipse in the ecliptic.\n The E-term corrections are as follows. A catalog FK4 position\n include corrections for elliptic terms of aberration. \n These positions are apparent places. For precession and/or \n rotations to other sky systems, one processes only mean places.\n So to get a mean place, one has to remove the E-terms vector.\n The ES suggests for the removal to use a decompositions of the\n E-term vector along the unit circle to get the approximate \n new vector, which has almost the correct angle and has almost \n length 1. The advantage is that when we add the E-term vector \n to this new vector, we obtain a new vector with the original \n angle, but with a length unequal to 1, which makes it suitable\n for closure tests.\n However, the procedure can be made more rigorous: \n For the subtraction we subtract the E-term vector from the \n start vector and normalize it afterwards. Then we have an\n exact new angle (opposed to the approximation in the ES).\n The procedure to go from a vector in the mean place system to \n a vector in the system of apparent places is a bit more \n complicated:\n Find a value for lambda so that the current vector is\n adjusted in length so that adding the e-term vector gives a new\n vector with length 1. This is by definition the new vector\n with the right angle. For more information, see the background\n information in :doc:`celestialbackground`.\n \"\"\"\n#----------------------------------------------------------------------\n\n xyzeterm = xyz.copy()\n if a == None:\n a = getEterms(1950.0)\n for i in range(xyz.shape[1]): # Loop over all vectors\n x = xyz[0,i]; y = xyz[1,i]; z = xyz[2,i]\n # Normalize to get a vector of length 1. Our algorithm is based on that fact.\n d = n.sqrt(x*x + y*y + z*z)\n x /= d; y /= d; z /= d\n # Find the lambda to stretch the vector \n w = 2.0 * (a[0]*x + a[1]*y + a[2]*z)\n p = a[0]*a[0] + a[1]*a[1] + a[2]*a[2] - 1.0\n lambda1 = (-w + n.sqrt(w*w-4.0*p))/2.0 # Vector a is small. We want only the positive lambda \n xyzeterm[0,i] = lambda1*x + a[0]\n xyzeterm[1,i] = lambda1*y + a[1] \n xyzeterm[2,i] = lambda1*z + a[2] \n\n return xyzeterm\n\n\n\ndef removeEterms(xyz, a=None):\n#----------------------------------------------------------------------\n \"\"\"\nRemove the elliptic component of annual aberration when this\nis included in a catalogue fk4 position.\n\n:param xyz:\n Cartesian position(s) converted from \n lonlat = [ (a1,d1),(a2,d2), ..., (an,dn) ] -->\n xyz = [ (x1,y1,z1), (x2,y2,z2), ..., (xn,yn,zn) ]\n:type xyz:\n NumPy (n,2) matrix\n:param a:\n E-terms vector (as returned by getEterms())\n If input a is omitted (== *None*), the e-terms for\n 1950 will be substituted.\n:type a:\n Tuple with 3 floating point numbers\n\n:Result: \n **Mean place**, NumPy (n,2) matrix\n\n:Notes:\n Return a new position where the elliptic terms of aberration \n are removed i.e. convert a apparent position from a catalog to\n a mean place.\n The effects of ecliptic aberration were included in the \n catalog positions to facilitate telescope pointing.\n See also notes at 'addEterms'.\n\n \"\"\"\n#----------------------------------------------------------------------\n xyzeterm = xyz.copy()\n if a == None:\n a = getEterms(1950.0)\n # a(1950) should be: = n.array([-1.62557e-6, -0.31919e-6, -0.13843e-6])\n for i in range(xyz.shape[1]): # Loop over all vectors data\n x = xyz[0,i]; y = xyz[1,i]; z = xyz[2,i]\n x -= a[0]; y -= a[1]; z -= a[2]\n xyzeterm[0,i] = x\n xyzeterm[1,i] = y\n xyzeterm[2,i] = z\n\n return xyzeterm\n\n\n\ndef precessionmatrix(zeta, z, theta):\n \"\"\"\n---------------------------------------------------------------------- \nPurpose: Given three precession angles, create the corresponding \n rotation matrix\nInput: zeta, z, theta\nReturns: Rotation matrix M as in XYZepoch1 = M * XYZepoch2\nNotes: Return the precession matrix for the three precession angles \n zeta, z and theta.\n Rotation matrix: R = rotZ(-z).rotY(th).rotZ(-zeta) (ES 3.21-7, p 103)\n Also allowed is the expression: rotZ(-90-z)*rotX(th)*rotZ(90-zeta)\n---------------------------------------------------------------------- \n \"\"\"\n return rotZ(-z)*rotY(theta)*rotZ(-zeta)\n\n\n\n\ndef IAU2006MatrixEpoch12Epoch2(epoch1, epoch2):\n#----------------------------------------------------------------------\n \"\"\"\nCreate a rotation matrix for a precession based on \nIAU 2000/2006 expressions, see function :func:`IAU2006precangles`\n\n:param epoch1:\n Julian start epoch\n:type epoch1:\n Floating point number\n:param epoch2:\n Julian epoch to precess to.\n:type epoch2:\n Floating point number\n\n:Returns: \n Matrix to transform equatorial coordinates from epoch1 to \n epoch2 as in XYZepoch2 = M * XYZepoch1\n\n:Reference:\n Capitaine N. et al.: IAU 2000 precession A&A 412, 567-586 (2003)\n\n:Notes: \n Note that we apply this precession only to equatorial\n coordinates in the system of dynamical J2000 coordinates.\n When converting from ICRS coordinates this means applying \n a frame bias. \n Therefore the angles differ from the precession \n Fukushima-Williams angles (IAU 2006)\n \n The precession matrix is::\n \n M = rotZ(-z).rotY(+theta).rotZ(-zeta)\n \"\"\" \n#----------------------------------------------------------------------\n if (epoch1 == epoch2):\n return I()\n if epoch1 == 2000.0:\n zeta, z, theta = IAU2006precangles(epoch2)\n return precessionmatrix(zeta, z, theta)\n elif epoch2 == 2000.0:\n zeta, z, theta = IAU2006precangles(epoch1)\n return (precessionmatrix(zeta, z, theta)).T\n else:\n # If both epochs are not J2000.0\n zeta, z, theta = IAU2006precangles(epoch1)\n M1 = (precessionmatrix(zeta, z, theta)).T\n zeta, z, theta = IAU2006precangles(epoch2)\n M2 = precessionmatrix(zeta, z, theta)\n return M2*M1\n\n\n\ndef BMatrixEpoch12Epoch2(Bepoch1, Bepoch2):\n#----------------------------------------------------------------------\n \"\"\"\nPrecession from one epoch to another in the fk4 system.\nIt uses :func:`Newcombprecangles` to calculate the \nprecession angles.\n\n\n:param Bepoch1:\n Besselian start epoch\n:type Bepoch1:\n Floating point number\n:param Bepoch2:\n Besselian epoch to precess to.\n:type Bepoch2:\n Floating point number\n\n:Returns: \n 3x3 rotation matrix M as in XYZepoch2 = M * XYZepoch1\n\n:Reference: \n Seidelman, P.K., 1992. Explanatory Supplement to the Astronomical\n Almanac. University Science Books, Mill Valley. 3.214 p 106\n\n:Notes: \n The precession matrix is::\n \n M = rotZ(-z).rotY(+theta).rotZ(-zeta)\n \n \"\"\"\n#----------------------------------------------------------------------\n zeta, z, theta = Newcombprecangles(Bepoch1, Bepoch2)\n return precessionmatrix(zeta, z, theta)\n\n\n\ndef JMatrixEpoch12Epoch2(Jepoch1, Jepoch2):\n#----------------------------------------------------------------------\n \"\"\"\nPrecession from one epoch to another in the fk5 system.\nIt uses :func:`Lieskeprecangles` to calculate the \nprecession angles.\n\n:param Jepoch1:\n Julian start epoch\n:type Jepoch1:\n Floating point number\n:param Jepoch2:\n Julian epoch to precess to.\n:type Jepoch2:\n Floating point number\n\n:Returns: \n 3x3 rotation matrix M as in XYZepoch2 = M * XYZepoch1\n\n:Reference: \n Seidelman, P.K., 1992. Explanatory Supplement to the Astronomical\n Almanac. University Science Books, Mill Valley. 3.214 p 106\n\n:Notes: \n The precession matrix is::\n \n M = rotZ(-z).rotY(+theta).rotZ(-zeta)\n\n \"\"\"\n#----------------------------------------------------------------------\n jd1 = epochJulian2JD(Jepoch1)\n jd2 = epochJulian2JD(Jepoch2)\n zeta, z, theta = Lieskeprecangles(jd1, jd2)\n return precessionmatrix(zeta, z, theta) \n\n\n\n\ndef FK42FK5Matrix(t=None):\n#----------------------------------------------------------------------\n \"\"\"\nCreate a matrix to precess from B1950 in FK4 to J2000 in FK5 \nfollowing to Murray's (1989) procedure.\n\n:param t:\n Besselian epoch as epoch of observation.\n:type t:\n Floating point number\n \n:Returns: \n 3x3 matrix M as in XYZfk5 = M * XYZfk4\n\n:Reference: \n * Murray, C.A. The Transformation of coordinates between the \n systems B1950.0 and J2000.0, and the principal galactic axis \n referred to J2000.0, \n Astronomy and Astrophysics (ISSN 0004-6361), vol. 218, no. 1-2, \n July 1989, p. 325-329.\n * Poppe P.C.R.,, Martin, V.A.F., Sobre as Bases de Referencia Celeste\n SitientibusSerie Ciencias Fisicas\n\n:Notes: \n Murray precesses from B1950 to J2000 using a precession matrix\n by Lieske. Then applies the equinox correction and ends up with a\n transformation matrix *X(0)* as given in this function.\n\n In Murray's article it is proven that using the procedure as\n described in the article, ``r_fk5 = X(0).r_fk4`` for extra galactic\n sources where we assumed that the proper motion in FK5 is zero.\n This procedure is independent of the epoch of observation.\n Note that the matrix is not a rotation matrix.\n\n FK4 is not an inertial coordinate frame (because of the error\n in precession and the motion of the equinox. This has \n consequences for the proper motions. e.g. a source with zero\n proper motion in FK5 has a fictitious proper motion in FK4.\n This affects the actual positions in a way that the correction\n is bigger if the epoch of observation is further away from 1950.0\n The focus of this library is on data of which we do not have\n information about the proper motions. So for positions of which\n we allow non zero proper motion in FK5 one needs to supply the\n epoch of observation.\n \n:Examples: \n Print the difference between the rotation matrix for 1970 and \n 1980:\n \n >>> M1 = celestial.FK42FK5Matrix(1970)\n >>> M2 = celestial.FK42FK5Matrix(1980)\n >>> M2 - M1\n matrix([[ -2.64546940e-10, -1.15396722e-07, 2.11108953e-07],\n [ 1.15403817e-07, -1.29040234e-09, 2.36016437e-09],\n [ -2.11125281e-07, -5.60232514e-10, 1.02585540e-09]])\n\n \n \"\"\"\n#----------------------------------------------------------------------\n r11 = 0.9999256794956877; r12 = -0.0111814832204662; r13 = -0.0048590038153592\n r21 = 0.0111814832391717; r22 = 0.9999374848933135; r23 = -0.0000271625947142\n r31 = 0.0048590037723143; r32 = -0.0000271702937440; r33 = 0.9999881946023742\n \n if t != None: # i.e. we also assuming that v != 0 in FK5 !!\n jd = epochBessel2JD(t)\n T = (jd-2433282.423)/36525.0 # t-1950 in Julian centuries = F^-1.t1 from Murray (1989)\n r11 += -0.0026455262*T/1000000.0\n r12 += -1.1539918689*T/1000000.0\n r13 += 2.1111346190*T/1000000.0\n r21 += 1.1540628161*T/1000000.0\n r22 += -0.0129042997*T/1000000.0\n r23 += 0.0236021478*T/1000000.0\n r31 += -2.1112979048*T/1000000.0\n r32 += -0.0056024448*T/1000000.0\n r33 += 0.0102587734*T/1000000.0\n return n.matrix( ([r11,r12,r13],[r21,r22,r23],[r31,r32,r33]) )\n\n\n\ndef FK42FK5MatrixAOKI():\n \"\"\"\n----------------------------------------------------------------------\nExperimental.\nCreate matrix to precess from B1950 in FK4 to J2000 in FK5\nThe method is described in section 3.59 of the ES. \nProper motions are not taken into account. Parallax and radial velocity\nare set to zero and not taken into account.\nWe do not repeat the procedures here, but copy part of the matrix from \nES, 3.591-4, p 185\nSee also reference below:\nAuthor(s): Aoki, S., Soma, M., Kinoshita, H., Inoue, K.\nTitle:\t Conversion matrix of epoch B 1950.0 FK4-based positions of \n stars to epoch J 2000.0 positions in accordance with \n the new IAU resolutions\nSource:\t Astron. Astrophys. 128, 263-267\nYear:\t 1983\n\nThe matrix in the Yallop (1989) article has more digits than the\nmatrix from the ES.\nYallop, B.D. et al, 1989. \"Transformation of mean star places\nfrom FK4 B1950.0 to FK5 J2000.0 using matrices in 6-space\".\nAstron.J. 97, 274.\n----------------------------------------------------------------------\n \"\"\"\n r0 = [0.999925678186902, -0.011182059642247, -0.004857946558960]\n r1 = [0.011182059571766, 0.999937478448132, -0.00002717441185]\n r3 = [0.004857946721186, -0.000027147426498, 0.999988199738770]\n return n.matrix( (r0,r1,r3) )\n\n\n\ndef FK42FK5MatrixLOWPREC():\n \"\"\"\n----------------------------------------------------------------------\nExperimental.\nCreate matrix to precess from B1950 in FK4 to J2000 in FK5\nThe method is described in section 3.59 of the ES. \nProper motions are not taken into account. Parallax and radial velocity\nare set to zero and not taken into account.\nWe do not repeat the procedures here, but copy part of the matrix from \nES, 3.591-4, p 185\nSee also reference below:\nAuthor(s): Aoki, S., Soma, M., Kinoshita, H., Inoue, K.\nTitle:\t Conversion matrix of epoch B 1950.0 FK4-based positions of \n stars to epoch J 2000.0 positions in accordance with \n the new IAU resolutions\nSource:\t Astron. Astrophys. 128, 263-267\nYear:\t 1983\n----------------------------------------------------------------------\n \"\"\"\n\n r0 = [0.9999256782, -0.0111820611, -0.0048579477]\n r1 = [0.0111820610, 0.9999374784, -0.0000271765]\n r3 = [0.0048579479, -0.0000271474, 0.9999881997]\n return n.matrix( (r0,r1,r3) )\n\n\n\n\ndef FK52FK4Matrix(t=None):\n \"\"\"\n----------------------------------------------------------------------\nPurpose: Create a matrix to convert a position in fk5 to fk4 using \n the inverse matrix FK42FK5Matrix\nInput: Epoch of observation for those situations where we allow\n no-zero proper motion in fk4\nReturns: Rotation matrix M as in XYZfk5 = M * XYZfk4\nNotes: For this matrix we know that the inverse is not the\n transpose.\n----------------------------------------------------------------------\n \"\"\"\n return FK42FK5Matrix(t).I\n\n\n\ndef FK42FK5MatrixOLDATTEMPT():\n \"\"\"\n----------------------------------------------------------------------\nExperimental.\nCreate matrix to precess from an epoch in FK4 to an epoch in FK5\nSo epoch1 is Besselian and epoch2 is Julian\n1) Do an epoch transformation in FK4 from input epoch to \n 1984 January 1d 0h\n2) Apply a zero point correction for the right ascension\n w.r.t. B1950. The formula is:\n E = E0 + E1*(jd-jd1950)/Cb\n E0 = 0.525; E1 = 1.275 and Cb = the length of the tropical \n century (ES 3.59 p 182) = 36524.21987817305\n For the correction at 1984,1,1 the ES lists 0.06390s which is\n 0.06390*15=0.9585\"\n This function calculated E = 0.958494476885\" which agrees with the \n literature.\n3) Transform in FK5 from 1984 January 1d 0h to epoch2\n\nNote that we do not use the adopted values for the precession angles, \nbut use the Woolward and Clemence expressions to calculate the angles.\nThese are one digit more accurate than the adopted values.\n----------------------------------------------------------------------\n \"\"\"\n # Epoch transformation from B1950 to 1984, 1,1 in FK4\n jd = JD(1984,1,1)\n epoch1984 = JD2epochBessel(jd)\n M1 = BMatrixEpoch12Epoch2(1950.0, epoch1984)\n\n # Equinox correction to the right ascension\n jd1950 = epochBessel2JD(1950.0)\n E0 = 0.525; E1 = 1.275\n Cb = 36524.21987817305 # In days = length of the tropical century\n E = E0 + E1*(jd-jd1950)/Cb\n\n E /= 3600.0 # From seconds of arc to degree\n M2 = rotZ(-E) # The correction is positive so we have to rotate\n # around the z-axis in the negative direction.\n # Epoch transformation from 1984,1,1 to J2000\n epoch1984 = JD2epochJulian(jd)\n M3 = JMatrixEpoch12Epoch2(epoch1984, 2000.0)\n\n return M3*M2*M1\n\n\n\ndef addpropermotion(xyz):\n \"\"\"\n----------------------------------------------------------------------\nExperimental.\nInput is a Cartesian position xyz.\nReturn a new position where the input position is corrected for \nassumed proper motion in the FK4 system.\nFor convenience we assume the epoch of observation is 1950\n----------------------------------------------------------------------\n \"\"\"\n twopi=6.283185307179586476925287\n pmf = 100.0*60*60*360/twopi\n\n d = 1950.0\n mjd = 15019.81352 + (d-1900)*365.242198781 # Convert to Modified Julian date\n Julianepoch = 2000.0 + (mjd-51544.5)/365.25 # Convert this mjd to Julian epoch\n w = (Julianepoch-2000.0)/pmf # Correction factor\n\n xyzpm = xyz.copy()\n r0 = [-0.000551, +0.238514, -0.435623] # Matrix from the ES.\n r1 = [-0.238565, -0.002667, +0.012254]\n r2 = [+0.435739, -0.008541, +0.002117]\n M = n.matrix( (r0,r1,r2) )\n\n for i in range(xyz.shape[1]): # Loop over all vectors\n p = n.array([xyz[0,i], xyz[1,i], xyz[2,i]]).T\n v = p.copy()\n v[0] = r0[0]*p[0]+r0[1]*p[1]+r0[2]*p[2]\n v[1] = r1[0]*p[0]+r1[1]*p[1]+r1[2]*p[2]\n v[2] = r2[0]*p[0]+r2[1]*p[1]+r2[2]*p[2]\n for j in range(2):\n xyzpm[j,i] = p[j] + w * v[j]\n return xyzpm\n\n\n\ndef EquinoxCorrection():\n \"\"\"\n----------------------------------------------------------------------\nExperimental.\nPurpose: Calculate the equinox correction according to Murray \n----------------------------------------------------------------------\n \"\"\"\n F = 1.000021359027778 #Converts the rate of change of Newcomb's precession from tropical centuries to Julian centuries.\n jd1 = epochs('B1950.0')[2]\n jd2 = epochs('J2000.0')[2]\n juliancenturies = (jd2-jd1) / 36525.0 # 1 Julian century is 36525 days\n E0 = 0.525; E1 = 1.275\n E = E0 + 0.0 * juliancenturies * F\n # print \", Juliancenturies, Juliancenturies-0.500002095577002 (Murray)\", juliancenturies, juliancenturies-0.500002095577002\n E /= 3600.0 # From seconds of arc to degree\n M = rotZ(-E) # The correction is positive so we have to rotate\n # around the z-axis in the negative direction.\n return M\n\n\n\ndef ICRS2FK5Matrix():\n#----------------------------------------------------------------------\n \"\"\"\nCreate a rotation matrix to convert a position from ICRS to fk5, J2000\n\n:Parameters:\n None\n\n:Returns: \n 3x3 rotation matrix M as in XYZfk5 = M * XYZicrs\n\n:Reference: \n Kaplan G.H., The IAU Resolutions on Astronomical Reference \n systems, Time scales, and Earth Rotation Models, US Naval \n Observatory, Circular No. 179\n\n:Notes: \n Return a matrix that converts a position vector in ICRS\n to FK5, J2000.\n We do not use the first or second order approximations\n given in the reference, but use the three rotation matrices\n from the same paper to obtain the exact result::\n \n M = rotX(-eta0)*rotY(xi0)*rotZ(da0)\n\n eta0 = -19.9 mas, xi0 = 9.1 mas and da0 = -22.9 mas\n\n \"\"\"\n#----------------------------------------------------------------------\n eta0 = -19.9/(3600*1000) # Convert mas to degree\n xi0 = 9.1/(3600*1000)\n da0 = -22.9/(3600*1000)\n return rotX(-eta0)*rotY(xi0)*rotZ(da0)\n \n\n\ndef ICRS2J2000Matrix():\n#----------------------------------------------------------------------\n \"\"\"\nReturn a rotation matrix for conversion of a position in the \nICRS to the dynamical reference system based on the dynamical\nmean equator and equinox of J2000.0 (called the dynamical\nJ2000 system) \n\n:Parameters:\n None\n \n:Returns: \n Rotation matrix to transform positions from ICRS to dyn J2000\n\n:Reference: \n * Hilton and Hohenkerk (2004), Astronomy and Astrophysics \n 413, 765-770\n * Kaplan G.H., The IAU Resolutions on Astronomical Reference\n systems, Time scales, and Earth Rotation Models, \n US Naval Observatory, Circular No. 179\n\n:Notes: \n Return a matrix that converts a position vector in ICRS\n to Dyn. J2000. We do not use the first or second order\n approximations given in the reference, but use the three \n rotation matrices to obtain the exact result::\n\n M = rotX(-eta0)*rotY(xi0)*rotZ(da0)\n\n eta0 = -6.8192 mas, xi0 = -16.617 mas and da0 = -14.6 mas\n\n \"\"\"\n#----------------------------------------------------------------------\n eta0 = -6.8192/(3600*1000) # Convert mas to degree\n xi0 = -16.617/(3600*1000)\n da0 = -14.6/(3600*1000)\n return rotX(-eta0)*rotY(xi0)*rotZ(da0)\n\n\n\ndef MatrixEpoch12Epoch2(epoch1, epoch2, S1, S2, epobs=None):\n#----------------------------------------------------------------------\n \"\"\"\nHelper function for :func:`skymatrix`. It handles precession and\nthe transformation between **equatorial** systems. This function\nincludes also conversions between reference systems.\n\n:param epoch1:\n Epoch belonging to system S1 depending on the reference \n system either Besselian or Julian.\n:type epoch1:\n Floating point number\n:param epoch2:\n Epoch belonging to system S2 depending on the reference \n system either Besselian or Julian.\n:param S1:\n Input reference system\n:type S1:\n Integer\n:param S2:\n Output rreferencesystem\n:type S2:\n Integer\n:param epobs:\n Epoch of observation. Only valid for conversions between\n FK4 and FK5.\n:type epobs:\n Floating point number\n \n:Returns: \n Rotation matrix to transform a position in one of the \n reference systems *S1* with *epoch1* to an equatorial system \n with equator and equinox at *epoch2* in reference system *S2*.\n\n:Notes: \n Return matrix to transform equatorial coordinates from\n *epoch1* to *epoch2* in either reference system FK4 or FK5. \n Or transform from epoch, FK4 or FK5 to ICRS or J2000 vice versa.\n Note that each transformation between FK4 and one of the\n other reference systems involves a conversion to\n FK5 and therefore the epoch of observation will be involved.\n \n Note that if no systems are entered and the one\n epoch is > 1984 and the other < 1984, then the\n transformation involves both sky reference systems FK4\n and FK5.\n\n:Examples: \n Calculate rotation matrix for a conversion between FK4, epoch 1940\n to FK5, epoch 1960, while the date of observation was 1950.\n \n >>> from kapteyn import celestial\n >>> celestial.MatrixEpoch12Epoch2(1940, 1960, celestial.fk4, celestial.fk5, 1950)\n matrix([[ 9.99988107e-01, -4.47301372e-03, -1.94362889e-03],\n [ 4.47301372e-03, 9.99989996e-01, -4.34712255e-06],\n [ 1.94362889e-03, -4.34680782e-06, 9.99998111e-01]])\n\n \"\"\"\n#----------------------------------------------------------------------\n # note that if S1 or S2 is equal to ICRS, then corresponding epoch is irrelevant\n if (S1==fk5 and S2==fk5):\n return JMatrixEpoch12Epoch2(epoch1, epoch2)\n elif (S1==fk4 and S2==fk4):\n return BMatrixEpoch12Epoch2(epoch1, epoch2)\n elif (S1==fk4 and S2==fk5):\n M1 = BMatrixEpoch12Epoch2(epoch1, 1950.0)\n M2 = FK42FK5Matrix(epobs)\n M3 = JMatrixEpoch12Epoch2(2000.0, epoch2)\n return M3*M2*M1\n elif (S1==fk5 and S2==fk4):\n M1 = JMatrixEpoch12Epoch2(epoch1, 2000.0)\n M2 = FK52FK4Matrix(epobs)\n M3 = BMatrixEpoch12Epoch2(1950.0, epoch2)\n return M3*M2*M1\n elif (S1==icrs and S2==icrs):\n return I()\n elif (S1==icrs and S2==fk4):\n M1 = ICRS2FK5Matrix()\n M2 = FK52FK4Matrix(epobs)\n M3 = BMatrixEpoch12Epoch2(1950.0, epoch2)\n return M3*M2*M1\n elif (S1==icrs and S2==fk5):\n M1 = ICRS2FK5Matrix()\n M2 = JMatrixEpoch12Epoch2(2000.0, epoch2)\n return M2*M1\n elif (S1==fk5 and S2==icrs):\n M1 = JMatrixEpoch12Epoch2(epoch1, 2000.0)\n M2 = ICRS2FK5Matrix().T\n return M2*M1\n elif (S1==fk4 and S2==icrs):\n M1 = BMatrixEpoch12Epoch2(epoch1, 1950.0)\n M2 = FK42FK5Matrix(epobs)\n M3 = ICRS2FK5Matrix().T\n return M3*M2*M1\n elif (S1==j2000 and S2==j2000):\n M1 = IAU2006MatrixEpoch12Epoch2(epoch1, epoch2)\n return M1\n elif (S1==j2000 and S2==icrs):\n M1 = IAU2006MatrixEpoch12Epoch2(epoch1, 2000.0)\n M2 = ICRS2J2000Matrix().T\n return M2*M1\n elif (S1==j2000 and S2==fk5):\n M1 = IAU2006MatrixEpoch12Epoch2(epoch1, 2000.0)\n M2 = ICRS2J2000Matrix().T\n M3 = ICRS2FK5Matrix()\n M4 = JMatrixEpoch12Epoch2(2000.0, epoch2)\n return M4*M3*M2*M1\n elif (S1==j2000 and S2==fk4):\n M1 = IAU2006MatrixEpoch12Epoch2(epoch1, 2000.0)\n M2 = ICRS2J2000Matrix().T\n M3 = ICRS2FK5Matrix()\n M4 = FK52FK4Matrix(epobs)\n M5 = BMatrixEpoch12Epoch2(1950.0, epoch2)\n return M5*M4*M3*M2*M1\n elif (S1==icrs and S2==j2000):\n M1 = ICRS2J2000Matrix()\n M2 = IAU2006MatrixEpoch12Epoch2(2000.0, epoch2)\n return M2*M1\n elif (S1==fk5 and S2==j2000):\n M1 = JMatrixEpoch12Epoch2(epoch1, 2000.0)\n M2 = ICRS2FK5Matrix().T\n M3 = ICRS2J2000Matrix()\n M4 = IAU2006MatrixEpoch12Epoch2(2000.0, epoch2)\n return M4*M3*M2*M1\n elif (S1==fk4 and S2==j2000):\n M1 = BMatrixEpoch12Epoch2(epoch1, 1950.0)\n M2 = FK52FK4Matrix(epobs).T\n M3 = ICRS2FK5Matrix().T\n M4 = ICRS2J2000Matrix()\n M5 = IAU2006MatrixEpoch12Epoch2(2000.0, epoch2)\n return M5*M4*M3*M2*M1\n else:\n mes = \"Unknown celestial reference system: %s or %s\" % (S1, S2) \n raise Exception(mes)\n\n\n\ndef rotmatrix(skyin, skyout, epoch1=2000.0, epoch2=2000.0, S1=fk5, S2=fk5, epobs=None):\n \"\"\"\n----------------------------------------------------------------------\nPurpose: Calculate and return the wanted rotation matrix.\nInput: A complete specification of input and output sky systems.\n The sky systems are equatorial, ecliptic, galactic, supergalactic\n which are represented by numbers 0,1,2 and 3\n The reference systems are fk4, fk4_no_e, fk5, icrs, j2000\n which are represented by the numbers.\nReturns: Transformation matrix as in XYZout = M * XYZin\nReference: -\n---------------------------------------------------------------------\n \"\"\"\n if skyin == equatorial:\n if skyout == equatorial:\n M1 = MatrixEpoch12Epoch2(epoch1, epoch2, S1, S2, epobs) # eq -> eq epoch1 to epoch2\n return M1\n if skyout == ecliptic:\n M1 = MatrixEpoch12Epoch2(epoch1, epoch2, S1, S2)\n M2 = MatrixEq2Ecl(epoch2, S2)\n return M2*M1\n if skyout == galactic: # eq(epoch1) -> galactic\n M1 = MatrixEpoch12Epoch2(epoch1, 1950.0, S1, fk4)\n M2 = MatrixEqB19502Gal()\n return M2*M1\n if skyout == supergalactic: # eq(epoch1) -> super galactic\n M1 = MatrixEpoch12Epoch2(epoch1, 1950.0, S1, fk4)\n M2 = MatrixEqB19502Gal()\n M3 = MatrixGal2Sgal()\n return M3*M2*M1\n else:\n mes = \"Unknown output sky system: %s\" % (S2,)\n raise Exception(mes)\n\n elif skyin == ecliptic:\n if skyout == equatorial:\n M1 = MatrixEq2Ecl(epoch1, S1).T # S2 sets epoch to Besselian or Julian\n M2 = MatrixEpoch12Epoch2(epoch1, epoch2, S1, S2)\n return M2*M1\n if skyout == ecliptic: # ecl -> ecl epoch1 to epoch2\n # This is an epoch transformation only\n M1 = MatrixEq2Ecl(epoch1, S1).T # to eq(epoch1)\n M2 = MatrixEpoch12Epoch2(epoch1, epoch2, S1, S2) # epoch1 to epoch2\n M3 = MatrixEq2Ecl(epoch2, S2) # return to ecl(epoch2)\n return M3*M2*M1\n if skyout == galactic: # ecl(epoch1) -> galactic\n M1 = MatrixEq2Ecl(epoch1, S1).T # to eq(epoch1)\n M2 = MatrixEpoch12Epoch2(epoch1, 1950.0, S1, fk4) # to eq(2000.0)\n M3 = MatrixEqB19502Gal() # eq(2000) to gal\n return M3*M2*M1\n if skyout == supergalactic: # ecl(epoch1) -> super galactic\n M1 = MatrixEq2Ecl(epoch1, S1).T # to eq(epoch1)\n M2 = MatrixEpoch12Epoch2(epoch1, 1950.0, S1, fk4) # to eq(2000.0)\n M3 = MatrixEqB19502Gal() # eq(2000) to gal\n M4 = MatrixGal2Sgal() # gal to sgal\n return M4*M3*M2*M1\n else:\n mes = \"Unknown output sky system: %s\" % (S2,)\n raise Exception(mes)\n\n elif skyin == galactic:\n if skyout == equatorial: # gal -> eq, epoch2\n M1 = MatrixEqB19502Gal().T # gal to fk4 B1950\n M2 = MatrixEpoch12Epoch2(1950.0, epoch2, fk4, S2) # fk4 B1950 to eq, epoch2\n return M2*M1\n if skyout == ecliptic: # gal -> ecl(epoch1)\n M1 = MatrixEqB19502Gal().T # gal to fk4 B1950\n M2 = MatrixEpoch12Epoch2(1950.0, epoch2, fk4, S2) # fk4 B1950 to fk5 any epoch,equinox\n M3 = MatrixEq2Ecl(epoch2, S2) # eq(epoch2) to ecl(epoch2)\n return M3*M2*M1\n if skyout == galactic: # gal -> gal\n return I()\n if skyout == supergalactic: # gal -> sgal\n M1 = MatrixGal2Sgal()\n return M1\n else:\n mes = \"Unknown output sky system: %s\" % (S2,)\n raise Exception(mes)\n\n elif skyin == supergalactic:\n if skyout == equatorial: # sgal -> eq(epoch2)\n M1 = MatrixGal2Sgal().T # sgal to gal\n M2 = MatrixEqB19502Gal().T # gal to eq(2000)\n M3 = MatrixEpoch12Epoch2(1950.0, epoch2, fk4, S2) # epoch 2000 to epoch2\n return M3*M2*M1\n if skyout == ecliptic: # sgal -> ecl(epoch2)\n M1 = MatrixGal2Sgal().T # sgal to gal\n M2 = MatrixEqB19502Gal().T # gal to eq(2000)\n M3 = MatrixEpoch12Epoch2(1950.0, epoch2, fk4, S2) # 1950 to epoch2\n M4 = MatrixEq2Ecl(epoch2, S2) # eq(epoch2) to ecl(epoch2)\n return M4*M3*M2*M1\n if skyout == galactic: # sgal -> gal\n M1 = MatrixGal2Sgal().T\n return M1\n if skyout == supergalactic: # sgal -> sgal\n return I()\n else:\n mes = \"Unknown output sky system: %s\" % (S2,)\n raise Exception(mes)\n else:\n mes = \"Unknown input sky system: %s\" % (S1,)\n raise Exception(mes)\n\n\n\ndef skyparser(skyin):\n#----------------------------------------------------------------------\n \"\"\"\nParse a string, tuple or single integer that represents a sky definition.\nA sky definition can consist of a *sky system*,\na *reference system*, an *equinox* and an *epoch of\nobservation*.\nSee also the description at :ref:`celestial-skydefinitions`.\nThe elements in the string are separated by\na comma or a space. The order of the elements is not important.\nThe string is converted to a tuple by :func:`celestial.parseskydefs`.\n\nThe parser is used in function :func:`celestial.skymatrix`\nand :func:`celestial.sky2sky`. External applications can use this function\nto check whether user input is valid.\n\nDefinitions in strings are usually used to define output sky definitions\nin prompts or on command lines. Applications can use integer id's\nfor the sky- and reference systems. These integer id's are global constants\nSee also :ref:`celestial-skysystems` and :ref:`celestial-refsystems`.\n \nThe sky system and reference system strings are minimal matched\n(case INsensitive) with the strings in the table\nin the documentation at :ref:`celestial-skysystems` and :ref:`celestial-refsystems`.\n\nFor the epoch syntax read the documentation at :ref:`celestial-epochs`.\nNote that an epoch of observation is either a second epoch in the string\n(the first is always the equinox) or the epoch string has\na suffix '_' which may be follwed by arbitrary characters.\n\n:param skyin:\n Represents a sky definition. See examples.\n:type skyin:\n String, tuple or integer\n \n:Returns:\n A tuple with the 'coded' system where strings for sky- and reference systems\n are replaced by integer id's. Missing values are filled in with defaults.\n\n If an error occurred then an exception will be raised. \n\n:raises:\n :exc:`ValueError`\n From :func:`celestial.parseskydefs`:\n \n * *Empty string!*\n * *Too many items for sky definition!*\n * *... is ambiguous sky or reference system!*\n * *... is not a valid epoch or sky/ref system!*\n\n From this function:\n\n * *Sky definition is not a string nor a tuple!*\n * *Too many elements in sky definition (max. 4)!*\n * *Two sky systems given!*\n * *Two reference systems given!*\n * *Invalid number for sky- or reference system!*\n * *Cannot determine the sky system!*\n * *Input contains an element that is not an integer or a string!*\n\n:Examples: \n \n >>> print celestial.skyparser(\"B1983.5_O fk4 B1960,eq\")\n (0, 4, 1960.0, 1983.5)\n\n >>> print celestial.skyparser(\"su\")\n (3, None, None, None)\n\n >>> print celestial.skyparser(\"supergal\")\n (3, None, None, None)\n\n \n \n:Notes:\n This is the parser for a sky definition.\n In this definition one can specify the sky system,\n the reference system, an equinox and an epoch of\n observation if the reference system is fk4.\n The order of these elements is not important.\n\n The rules for the defaults are:\n\n * What if the sky system is not defined? If there is a reference\n system then we assume it is equatorial (could have been ecliptic).\n * If there no sky system and no reference system but there is\n an equinox, assume sky system is equatorial (could have been ecliptic).\n * If there no sky system and no reference system and no\n equinox but there is an epoch of observation,\n assume sky system is equatorial.\n * Assume we have a sky system. What if there is no reference system?\n Standard in FITS: RADESYS (i.e our reference system) defaults to\n IRCS unless EQUINOX is given alone,\n in which case it defaults to FK4 prior to 1984 and FK5 after 1984.\n * Assume we have a sky system and a reference system and the sky system was\n ecliptic or equatorial. What if we don't have an equinox?\n Standard in FITS: EQUINOX defaults to 2000 unless RADESYS is FK4,\n in which case it defaults to 1950.\n * We have one item to address and that is the epoch of observation.\n This epoch of observation only applies to the reference systems FK4\n and FK4_NO_E.\n In 'Representations of celestial coordinates in FITS' (Calabretta & Greisen)\n we read that all reference systems are allowed for both equatorial- and\n ecliptic coordinates, except FK4-NO-E, which is only allowed for equatorial\n coordinates. If FK4-NO-E is given in combination with an ecliptic\n sky system then silently FK4 is assumed.\n \"\"\"\n#----------------------------------------------------------------------\n epochin = None\n epochinset = None\n refin = None\n epobs = None\n sysin = None\n first = True\n\n if skyin == None: # Nothing to parse\n return sysin, refin, epochin, epobs\n\n if not isinstance(skyin, six.string_types + (tuple,)):\n try:\n skyin = tuple([skyin])\n except:\n raise ValueError(\"Sky definition is not a string nor a tuple or a scalar!\")\n if isinstance(skyin, six.string_types):\n skyin = parseskydef(skyin)\n if skyin is None: # e.g. input was '{}' then parseskydef returns None\n return None, None, None, None\n if len(skyin) > 4:\n raise ValueError(\"Too many elements in sky definition (max. 4)!\")\n\n # Parse the tuple into a sky system, a reference system, equinox and obs epoch\n for element in skyin:\n if type(element) == int:\n s = skyrefsystems.id2skyref(element)\n if s != None:\n if s.refsystem:\n if refin == None:\n refin = element\n else:\n raise ValueError(\"Two sky systems given!\")\n else:\n if sysin == None:\n sysin = element\n else:\n raise ValueError(\"Two reference systems given!\")\n else:\n raise ValueError(\"Invalid number for sky- or reference system!\")\n elif isinstance(element, six.string_types):\n if first and element.find('_') == -1: # i.e. it is not an obs epoch\n epochinset = epochs(element)\n first = False\n else:\n # Could be obs. epoch if underscore in string or it is the second epoch\n epobs = epochs(element)[0] # Always in Besselian data\n elif element != None:\n raise ValueError(\"Input contains an element that is not an integer or a string!\")\n #------------------------------------------------------------\n # At this stage we have\n # sysin (sky system): integer or None\n # refin (ref. system): integer or None\n # epochinset (equinox): (B, J, JD) or None\n # epobs (epoch of observation): Besselian epoch or None\n #------------------------------------------------------------\n\n\n # Here we start to fill in the missing parts.\n # Most defaults are defined in the FITS standard. Others are the\n # most sensible. If essential parts are missing then an exception\n # will be raised.\n\n # What if the sky system is not defined? If there is a reference\n # system then we assume it is equatorial.\n if sysin == None:\n if refin != None:\n sysin = eq # But this could also be ecliptic (except for fk4_no_e)\n else:\n if epochinset != None: # No ref sys but an equinox: assume equatorial\n sysin = eq\n elif epobs != None:\n sysin = eq\n else:\n raise ValueError(\"Cannot determine the sky system!\")\n\n # Now we have a sky system. What if there is no reference system?\n # Standard in FITS: RADESYS defaults to IRCS unless EQUINOX is given alone, \n # in which case it defaults to FK4 prior to 1984 and FK5 after 1984.\n if sysin in [eq, ecl]:\n if refin == None:\n refin = icrs\n if epochinset != None:\n jd = epochinset[2]\n if jd < epochJulian2JD(1984.0):\n epochin = JD2epochBessel(jd) # Always Besselian even if epoch was specified as Julian\n else:\n epochin = JD2epochJulian(jd)\n if epochin < 1984.0:\n refin = fk4 # Dangerous default. Could also be fk4_no_e for radio data\n else:\n refin = fk5\n elif sysin == eq and epobs != None:\n # If there is no reference system and there is no equinox\n # but there is an epoch of observation, then the reference is\n # fk4\n refin = fk4\n else:\n # Other sky systems do not have a reference system\n refin = None\n\n # We have a sky system and a reference system if the sky system was\n # ecliptic or equatorial. What if we don't have an equinox?\n # FITS: EQUINOX defaults to 2000 unless RADESYS is FK4, in which case\n # it defaults to 1950.\n if sysin in [eq, ecl]:\n if epochinset == None:\n if refin == fk4 or refin == fk4_no_e: # The ref. system belongs to the fk4 family, \n epochin = 1950.0\n else:\n epochin = 2000.0\n else:\n if refin == fk4 or refin == fk4_no_e:\n epochin = epochinset[0] # Besselian epoch\n else:\n epochin = epochinset[1] # Julian epoch\n\n # We have one item to address and that is the epoch of observation.\n # In 'Representations of celestial coordinates in FITS' (Calabretta & Greisen)\n # we read that all reference systems are allowed for both equatorial- and\n # ecliptic coordinates. Except FK4-NO-E which is only allowed for equatorial\n # coordinates!\n # This seems to contradict the fact that we must convert from fk4 to ecliptic\n # via fk4-no-e and therefore the actual reference system is fk4-no-e\n if not ((sysin == eq or sysin == ecl) and (refin == fk4 or refin == fk4_no_e)):\n epobs = None\n\n return sysin, refin, epochin, epobs\n\n\n\ndef parseskydef(skydef_in):\n#----------------------------------------------------------------------\n \"\"\"\nParse a string that represents a sky definition.\nSee documentation at function skyparser()\nA tuple with values is returned. If the sky system was empty as\nin {}, then return None\n \"\"\"\n#----------------------------------------------------------------------\n if skydef_in == '':\n raise Exception('Empty string!')\n\n bs = skydef_in.startswith('{')\n be = skydef_in.endswith('}')\n if bs and not be:\n raise ValueError(\"Definition starts with '{' but does not end with '}'\")\n if be and not bs:\n raise ValueError(\"Definition ends with '}' but does not start with '{'\")\n if bs and be:\n skydef = skydef_in[1:-1] # Remove braces\n if len(skydef.strip()) == 0: # Empty sky def. {}\n return None \n else:\n skydef = skydef_in\n\n tokens = re_split('[,\\s]+', skydef.strip()) # Split on whitespace and comma\n if len(tokens) > 4: # sky, ref, equinox, dateobs\n raise ValueError(\"Too many items for sky definition!\")\n\n sky = []\n for t in tokens:\n t.strip()\n errmes = ''\n s, found = skyrefsystems.minmatch2skyref(t) # 'skyrefs' is global list\n if s != None:\n if found > 1:\n errmes = \"%s is ambiguous sky or reference system!\" % t\n raise ValueError(errmes)\n else:\n sky.append(s.idnum)\n else:\n try:\n B, J, JD = epochs(t)\n sky.append(t)\n except:\n errmes = \"%s is not a valid epoch or sky/ref system!\" % t\n raise ValueError(errmes)\n return tuple(sky)\n\n\ndef isparsed(skytuple):\n #----------------------------------------------------------------------\n \"\"\"\n A sky definition after parsing is a tuple with 4 elements.\n None of these is a string. The first one is a number and\n the others are either a number or are equal to None.\n \"\"\"\n #----------------------------------------------------------------------\n if type(skytuple) == tuple and len(skytuple) == 4 and\\\n type(skytuple[0]) == int and\\\n type(skytuple[1]) != bytes and\\\n type(skytuple[2]) != bytes and\\\n type(skytuple[3]) != bytes:\n return True\n return False\n\n\ndef skymatrix(skyin, skyout):\n#----------------------------------------------------------------------\n \"\"\"\nCreate a transformation matrix to be used to transform a position from\none sky system to another (including epoch transformations).\nFor a description of the sky definitions see :ref:`celestial-skydefinitions`.\n\n:param skyin:\n One of the supported sky systems or a tuple for equatorial systems\n which are identified with an equinox an reference system.\n This is the sky system from which you want to transform to\n another sky system (*skyout*).\n:type skyin:\n Integer or tuple with one to four elements\n:param skyout:\n The destination sky system\n:type skyin:\n Integer or tuple with one to four elements\n \n\n:Returns: Three elements:\n \n * The transformation matrix *M* for the transformation\n of positions in (x,y,z) as in *XYZskyout = M * XYZskyin*\n * followed by 'None' or a tuple with the e-term vector belonging\n input epoch.\n * followed by *None* or a tuple with the e-term vector belonging\n to the output epoch.\n \n See also notes below.\n \n:Notes:\n The reference systems FK4 and FK4_NO_E are special. We\n consider FK4 as a catalog position where the **e-terms** are\n included. So besides a transformation matrix, this function\n should also return a flag for the addition or removal of\n e-terms. This flag is either *None* or the e-term vector\n which depends on the epoch.\n \n The structure of the output then is as follows:\n ``M, (A1,A2,A3), (A4,A5,A6)``\n where:\n \n * *M*: The 3x3 transformation matrix\n * *(A1,A2,A3)* or *None*: for adding or removing e-terms\n in the input sky system using this e-term vector *(A1,A2,A3)*.\n * *(A4,A5,A6)* or *None*: for adding or removing e-terms\n in the output sky system using this e-term vector *(A4,A5,A6)*.\n\n This function is the main function of this module.\n It calls function *skyparser()* for the parsing of the input and\n *rotmatrix()* to get the rotation matrix.\n There utility function *sky2sky()* transforms a sequence\n of longitudes and latitudes from one sky system to another.\n It is a valuable tool for experiments in an interactive Python\n session.\n \n:Examples:\n Some examples of transformations between sky systems using either\n strings or tuples. We advise to use strings which is more safe\n then using variables from celestial (which can be accidentally\n replaced by other values). \n Note that for transformations where FK4 is involved,\n the matrix is followed by a vector with e-terms.\n \n >>> from kapteyn import celestial\n >>> print skymatrix(celestial.gal,(celestial.eq,\"j2000\",celestial.fk5))\n (matrix([[-0.05487554, 0.49410945, -0.86766614],\n [-0.8734371 , -0.44482959, -0.19807639],\n [-0.48383499, 0.74698225, 0.45598379]]),\n None,\n None)\n \n >>> print skymatrix(celestial.fk4, celestial.fk5)\n (matrix([[ 9.99925679e-01, -1.11814832e-02, -4.85900382e-03],\n [ 1.11814832e-02, 9.99937485e-01, -2.71625947e-05],\n [ 4.85900377e-03, -2.71702937e-05, 9.99988195e-01]]),\n (-1.6255503575995309e-06,\n -3.1918587795578522e-07,\n -1.3842701121066153e-07), None)\n \n >>> print skymatrix(\"eq,B1950.0,fk4_no_e\",\"eq,B1950.0,fk4\")\n (matrix([[ 1., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0., 1.]]),\n None,\n (-1.6255503575995309e-06,\n -3.1918587795578522e-07,\n -1.3842701121066153e-07))\n \n >>> print skymatrix(\"eq b1950 fk4 j1983.5\", \"eq J2000 fk5\")\n (matrix([[ 9.99925679e-01, -1.11818698e-02, -4.85829658e-03],\n [ 1.11818699e-02, 9.99937481e-01, -2.71546879e-05],\n [ 4.85829648e-03, -2.71721706e-05, 9.99988198e-01]]),\n (-1.6255503575995309e-06,\n -3.1918587795578522e-07,\n -1.3842701121066153e-07),\n None)\n \n >>> print skymatrix(\"eq J2000 fk4 F1984-1-1T0:30\", \"eq J2000 fk5\")\n (matrix([[ 1.00000000e+00, -5.45185721e-06, -3.39404820e-07],\n [ 5.45185723e-06, 1.00000000e+00, 2.24950276e-08],\n [ 3.39404701e-07, -2.24971595e-08, 1.00000000e+00]]),\n (-1.6181121582090453e-06,\n -3.4112123324131958e-07,\n -1.4789407828956555e-07),\n None)\n\n\n See :ref:`celestial-epochs` for the possible epoch formats.\n \"\"\"\n#---------------------------------------------------------------------\n if isparsed(skyin): # Then no need to parse again\n sysin, refin, epochin, epobsin = skyin\n else:\n sysin, refin, epochin, epobsin = skyparser(skyin)\n if isparsed(skyout):\n sysout, refout, epochout, epobsout = skyout\n else:\n sysout, refout, epochout, epobsout = skyparser(skyout)\n\n # Take care of the e-terms\n Aep1 = None; Aep2 = None\n if sysin == eq:\n if refin == fk4:\n # This is a catalog value. We should remove e-terms before we transform anything\n Aep1 = getEterms(epochin)\n if refin == fk4_no_e:\n refin = fk4\n if sysout == eq:\n if refout == fk4:\n Aep2 = getEterms(epochout)\n if refout == fk4_no_e:\n refout = fk4\n # No e-terms for ecliptic coordinates in fk4\n # If fk4-no-e was selected then use fk4\n if sysin == ecl:\n if refin == fk4_no_e:\n refin = fk4\n if sysout == ecl:\n if refout == fk4_no_e:\n refout = fk4\n\n epobs = None\n if refin == fk4 and epobsin != None:\n epobs = epobsin\n if refout == fk4 and epobsout != None:\n epobs = epobsout\n\n return rotmatrix(sysin, sysout, epochin, epochout, refin, refout, epobs), Aep1, Aep2\n\n\n\ndef dotrans(skytuple, xyz):\n \"\"\"\n----------------------------------------------------------------------\nPurpose: Utility function that performs the rotation and adding or\n removing e-terms\nInput: -The tuple as produced by skymatrix\n -one or more positions in Cartesian coordinates (xyz)\nReturns: The transformed (Cartesian) coordinates\nNotes: Function skymatrix returns a tuple with the rotation matrix\n and e-terms if necessary. Tuple element 0 is the rotation\n matrix. Function dotrans() does the rotation for a vector \n in Cartesian coordinates.\nExamples: >>> lonlat = n.array( [(lon,lat)] )\n >>> xyz = longlat2xyz(lonlat)\n >>> M = skymatrix((eq,fk4,'j1950','b1995.0'), (eq,'J2000',fk5))\n >>> xyz2 = dotrans(M , xyz)\n----------------------------------------------------------------------\n \"\"\"\n M, A1, A2 = skytuple\n if A1:\n xyz2 = removeEterms(xyz, A1)\n else:\n xyz2 = xyz\n xyz3 = M*xyz2\n if A2:\n xyz3 = addEterms(xyz3, A2)\n return xyz3\n\n\n\ndef sky2sky(skyin, skyout, lons, lats):\n#----------------------------------------------------------------------\n \"\"\"\nUtility function to facilitate command line use of skymatrix.\n\n:param skyin:\n The input sky definition\n:type skyin:\n See function :func:`skymatrix`\n:param skyout:\n The output sky definition\n:type skyout:\n See function :func:`skymatrix`\n:param lons:\n Input longitude(s)\n:type lons:\n Floating point number(s), scalar, list or tuple\n:param lats:\n Input latitude(s)\n:type lats:\n Floating point number(s), scalar, list or tuple\n\n:Returns:\n Matrix. One position per row. See example below how to\n extract rows, columns and elements from this matrix.\n\n:Example:\n Interactive Python session:\n\n >>> from kapteyn import celestial\n >>> M = celestial.sky2sky( (celestial.eq, celestial.fk5), celestial.gal,\n (0,0,1.0), (10,20,20) )\n >>> M\n matrix([[ 102.6262244 , -50.83256452],\n [ 106.78021643, -41.25289649],\n [ 107.9914125 , -41.49143448]])\n >>> M[2,0]\n 107.99141249678289\n >>> M[0] # Extract first transformed long, lat\n matrix([[ 102.6262244 , -50.83256452]])\n >>> M[:,1] # Extract second column with latitudes\n matrix([[-50.83256452],\n [-41.25289649],\n [-41.49143448]])\n\n:Notes:\n This function illustrates the core use of module *celestial*.\n First it converts the input of world coordinates into a matrix.\n This matrix is converted to spatial positions (X,Y,Z) with\n function *longlat2xyz()*. The function *dotrans()* transforms\n these positions (X,Y,Z) to positions (X2,Y2,Z2) in the output sky\n system. Then the function *xyz2longlat()* converts these positions\n into longitudes and latitudes and finally a matrix with these\n values is returned::\n \n lonlat = n.array( [(lons,lats)] )\n xyz = longlat2xyz(lonlat)\n xyz2 = dotrans(skymatrix(skyin, skyout), xyz)\n newlonlats = xyz2longlat(xyz2)\n return newlonlats\n\n \"\"\"\n#----------------------------------------------------------------------\n lonlat = n.array( [(lons,lats)] )\n xyz = longlat2xyz(lonlat)\n xyz2 = dotrans(skymatrix(skyin, skyout), xyz)\n newlonlats = xyz2longlat(xyz2)\n return newlonlats\n\n"
] |
[
[
"matplotlib.pylab.show",
"matplotlib.pylab.figure"
],
[
"numpy.dot",
"numpy.product",
"numpy.asarray",
"numpy.ascontiguousarray",
"numpy.iscomplexobj",
"numpy.array",
"numpy.zeros"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.matrix",
"numpy.fmod",
"numpy.sqrt",
"numpy.asarray",
"numpy.cos",
"numpy.sin",
"numpy.round",
"numpy.int",
"numpy.arctan2",
"numpy.identity",
"numpy.zeros",
"numpy.array",
"numpy.where",
"numpy.mat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
forsubmissionanonymity/nips_2021_2271
|
[
"81a9eccb222738ccab1c540a87b701b0b9783ba3"
] |
[
"utils/utils_squad_evaluate.py"
] |
[
"\"\"\" Official evaluation script for SQuAD version 2.0.\r\n Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0\r\nIn addition to basic functionality, we also compute additional statistics and\r\nplot precision-recall curves if an additional na_prob.json file is provided.\r\nThis file is expected to map question ID's to the model's predicted probability\r\nthat a question is unanswerable.\r\n\"\"\"\r\nimport argparse\r\nimport collections\r\nimport json\r\nimport numpy as np\r\nimport os\r\nimport re\r\nimport string\r\nimport sys\r\n\r\nclass EVAL_OPTS():\r\n def __init__(self, data_file, pred_file, out_file=\"\",\r\n na_prob_file=\"na_prob.json\", na_prob_thresh=1.0,\r\n out_image_dir=None, verbose=False):\r\n self.data_file = data_file\r\n self.pred_file = pred_file\r\n self.out_file = out_file\r\n self.na_prob_file = na_prob_file\r\n self.na_prob_thresh = na_prob_thresh\r\n self.out_image_dir = out_image_dir\r\n self.verbose = verbose\r\n\r\nOPTS = None\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')\r\n parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')\r\n parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')\r\n parser.add_argument('--out-file', '-o', metavar='eval.json',\r\n help='Write accuracy metrics to file (default is stdout).')\r\n parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json',\r\n help='Model estimates of probability of no answer.')\r\n parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0,\r\n help='Predict \"\" if no-answer probability exceeds this (default = 1.0).')\r\n parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None,\r\n help='Save precision-recall curves to directory.')\r\n parser.add_argument('--verbose', '-v', action='store_true')\r\n if len(sys.argv) == 1:\r\n parser.print_help()\r\n sys.exit(1)\r\n return parser.parse_args()\r\n\r\ndef make_qid_to_has_ans(dataset):\r\n qid_to_has_ans = {}\r\n for article in dataset:\r\n for p in article['paragraphs']:\r\n for qa in p['qas']:\r\n qid_to_has_ans[qa['id']] = bool(qa['answers'])\r\n return qid_to_has_ans\r\n\r\ndef normalize_answer(s):\r\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\r\n def remove_articles(text):\r\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\r\n return re.sub(regex, ' ', text)\r\n def white_space_fix(text):\r\n return ' '.join(text.split())\r\n def remove_punc(text):\r\n exclude = set(string.punctuation)\r\n return ''.join(ch for ch in text if ch not in exclude)\r\n def lower(text):\r\n return text.lower()\r\n return white_space_fix(remove_articles(remove_punc(lower(s))))\r\n\r\ndef get_tokens(s):\r\n if not s: return []\r\n return normalize_answer(s).split()\r\n\r\ndef compute_exact(a_gold, a_pred):\r\n return int(normalize_answer(a_gold) == normalize_answer(a_pred))\r\n\r\ndef compute_f1(a_gold, a_pred):\r\n gold_toks = get_tokens(a_gold)\r\n pred_toks = get_tokens(a_pred)\r\n common = collections.Counter(gold_toks) & collections.Counter(pred_toks)\r\n num_same = sum(common.values())\r\n if len(gold_toks) == 0 or len(pred_toks) == 0:\r\n # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\r\n return int(gold_toks == pred_toks)\r\n if num_same == 0:\r\n return 0\r\n precision = 1.0 * num_same / len(pred_toks)\r\n recall = 1.0 * num_same / len(gold_toks)\r\n f1 = (2 * precision * recall) / (precision + recall)\r\n return f1\r\n\r\ndef get_raw_scores(dataset, preds):\r\n exact_scores = {}\r\n f1_scores = {}\r\n for article in dataset:\r\n for p in article['paragraphs']:\r\n for qa in p['qas']:\r\n qid = qa['id']\r\n gold_answers = [a['text'] for a in qa['answers']\r\n if normalize_answer(a['text'])]\r\n if not gold_answers:\r\n # For unanswerable questions, only correct answer is empty string\r\n gold_answers = ['']\r\n if qid not in preds:\r\n print('Missing prediction for %s' % qid)\r\n continue\r\n a_pred = preds[qid]\r\n # Take max over all gold answers\r\n exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)\r\n f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)\r\n return exact_scores, f1_scores\r\n\r\ndef apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):\r\n new_scores = {}\r\n for qid, s in scores.items():\r\n pred_na = na_probs[qid] > na_prob_thresh\r\n if pred_na:\r\n new_scores[qid] = float(not qid_to_has_ans[qid])\r\n else:\r\n new_scores[qid] = s\r\n return new_scores\r\n\r\ndef make_eval_dict(exact_scores, f1_scores, qid_list=None):\r\n if not qid_list:\r\n total = len(exact_scores)\r\n return collections.OrderedDict([\r\n ('exact', 100.0 * sum(exact_scores.values()) / total),\r\n ('f1', 100.0 * sum(f1_scores.values()) / total),\r\n ('total', total),\r\n ])\r\n else:\r\n total = len(qid_list)\r\n return collections.OrderedDict([\r\n ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),\r\n ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),\r\n ('total', total),\r\n ])\r\n\r\ndef merge_eval(main_eval, new_eval, prefix):\r\n for k in new_eval:\r\n main_eval['%s_%s' % (prefix, k)] = new_eval[k]\r\n\r\ndef plot_pr_curve(precisions, recalls, out_image, title):\r\n plt.step(recalls, precisions, color='b', alpha=0.2, where='post')\r\n plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')\r\n plt.xlabel('Recall')\r\n plt.ylabel('Precision')\r\n plt.xlim([0.0, 1.05])\r\n plt.ylim([0.0, 1.05])\r\n plt.title(title)\r\n plt.savefig(out_image)\r\n plt.clf()\r\n\r\ndef make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,\r\n out_image=None, title=None):\r\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\r\n true_pos = 0.0\r\n cur_p = 1.0\r\n cur_r = 0.0\r\n precisions = [1.0]\r\n recalls = [0.0]\r\n avg_prec = 0.0\r\n for i, qid in enumerate(qid_list):\r\n if qid_to_has_ans[qid]:\r\n true_pos += scores[qid]\r\n cur_p = true_pos / float(i+1)\r\n cur_r = true_pos / float(num_true_pos)\r\n if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:\r\n # i.e., if we can put a threshold after this point\r\n avg_prec += cur_p * (cur_r - recalls[-1])\r\n precisions.append(cur_p)\r\n recalls.append(cur_r)\r\n if out_image:\r\n plot_pr_curve(precisions, recalls, out_image, title)\r\n return {'ap': 100.0 * avg_prec}\r\n\r\ndef run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, \r\n qid_to_has_ans, out_image_dir):\r\n if out_image_dir and not os.path.exists(out_image_dir):\r\n os.makedirs(out_image_dir)\r\n num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)\r\n if num_true_pos == 0:\r\n return\r\n pr_exact = make_precision_recall_eval(\r\n exact_raw, na_probs, num_true_pos, qid_to_has_ans,\r\n out_image=os.path.join(out_image_dir, 'pr_exact.png'),\r\n title='Precision-Recall curve for Exact Match score')\r\n pr_f1 = make_precision_recall_eval(\r\n f1_raw, na_probs, num_true_pos, qid_to_has_ans,\r\n out_image=os.path.join(out_image_dir, 'pr_f1.png'),\r\n title='Precision-Recall curve for F1 score')\r\n oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}\r\n pr_oracle = make_precision_recall_eval(\r\n oracle_scores, na_probs, num_true_pos, qid_to_has_ans,\r\n out_image=os.path.join(out_image_dir, 'pr_oracle.png'),\r\n title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')\r\n merge_eval(main_eval, pr_exact, 'pr_exact')\r\n merge_eval(main_eval, pr_f1, 'pr_f1')\r\n merge_eval(main_eval, pr_oracle, 'pr_oracle')\r\n\r\ndef histogram_na_prob(na_probs, qid_list, image_dir, name):\r\n if not qid_list:\r\n return\r\n x = [na_probs[k] for k in qid_list]\r\n weights = np.ones_like(x) / float(len(x))\r\n plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))\r\n plt.xlabel('Model probability of no-answer')\r\n plt.ylabel('Proportion of dataset')\r\n plt.title('Histogram of no-answer probability: %s' % name)\r\n plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name))\r\n plt.clf()\r\n\r\ndef find_best_thresh(preds, scores, na_probs, qid_to_has_ans):\r\n num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])\r\n cur_score = num_no_ans\r\n best_score = cur_score\r\n best_thresh = 0.0\r\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\r\n for i, qid in enumerate(qid_list):\r\n if qid not in scores: continue\r\n if qid_to_has_ans[qid]:\r\n diff = scores[qid]\r\n else:\r\n if preds[qid]:\r\n diff = -1\r\n else:\r\n diff = 0\r\n cur_score += diff\r\n if cur_score > best_score:\r\n best_score = cur_score\r\n best_thresh = na_probs[qid]\r\n return 100.0 * best_score / len(scores), best_thresh\r\n\r\ndef find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):\r\n num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])\r\n cur_score = num_no_ans\r\n best_score = cur_score\r\n best_thresh = 0.0\r\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\r\n for i, qid in enumerate(qid_list):\r\n if qid not in scores: continue\r\n if qid_to_has_ans[qid]:\r\n diff = scores[qid]\r\n else:\r\n if preds[qid]:\r\n diff = -1\r\n else:\r\n diff = 0\r\n cur_score += diff\r\n if cur_score > best_score:\r\n best_score = cur_score\r\n best_thresh = na_probs[qid]\r\n\r\n has_ans_score, has_ans_cnt = 0, 0\r\n for qid in qid_list:\r\n if not qid_to_has_ans[qid]: continue\r\n has_ans_cnt += 1\r\n\r\n if qid not in scores: continue\r\n has_ans_score += scores[qid]\r\n\r\n return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt\r\n\r\ndef find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):\r\n best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)\r\n best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)\r\n main_eval['best_exact'] = best_exact\r\n main_eval['best_exact_thresh'] = exact_thresh\r\n main_eval['best_f1'] = best_f1\r\n main_eval['best_f1_thresh'] = f1_thresh\r\n\r\ndef find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):\r\n best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)\r\n best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)\r\n main_eval['best_exact'] = best_exact\r\n main_eval['best_exact_thresh'] = exact_thresh\r\n main_eval['best_f1'] = best_f1\r\n main_eval['best_f1_thresh'] = f1_thresh\r\n main_eval['has_ans_exact'] = has_ans_exact\r\n main_eval['has_ans_f1'] = has_ans_f1\r\n\r\ndef main(OPTS):\r\n with open(OPTS.data_file) as f:\r\n dataset_json = json.load(f)\r\n dataset = dataset_json['data']\r\n with open(OPTS.pred_file) as f:\r\n preds = json.load(f)\r\n if OPTS.na_prob_file:\r\n with open(OPTS.na_prob_file) as f:\r\n na_probs = json.load(f)\r\n else:\r\n na_probs = {k: 0.0 for k in preds}\r\n qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False\r\n has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]\r\n no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]\r\n exact_raw, f1_raw = get_raw_scores(dataset, preds)\r\n exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,\r\n OPTS.na_prob_thresh)\r\n f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,\r\n OPTS.na_prob_thresh)\r\n out_eval = make_eval_dict(exact_thresh, f1_thresh)\r\n if has_ans_qids:\r\n has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)\r\n merge_eval(out_eval, has_ans_eval, 'HasAns')\r\n if no_ans_qids:\r\n no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)\r\n merge_eval(out_eval, no_ans_eval, 'NoAns')\r\n if OPTS.na_prob_file:\r\n find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)\r\n if OPTS.na_prob_file and OPTS.out_image_dir:\r\n run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, \r\n qid_to_has_ans, OPTS.out_image_dir)\r\n histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns')\r\n histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns')\r\n if OPTS.out_file:\r\n with open(OPTS.out_file, 'w') as f:\r\n json.dump(out_eval, f)\r\n else:\r\n print(json.dumps(out_eval, indent=2))\r\n return out_eval\r\n \r\nif __name__ == '__main__':\r\n OPTS = parse_args()\r\n if OPTS.out_image_dir:\r\n import matplotlib\r\n matplotlib.use('Agg')\r\n import matplotlib.pyplot as plt \r\n main(OPTS)"
] |
[
[
"numpy.ones_like",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.use",
"matplotlib.pyplot.step",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
leyiweb/Deep-SAD-PyTorch
|
[
"305667c84b92167792816794f84b41273a7b41c0",
"305667c84b92167792816794f84b41273a7b41c0"
] |
[
"src/networks/layers/standard.py",
"src/networks/vae.py"
] |
[
"import torch\n\nfrom torch.nn import Module\nfrom torch.nn import init\nfrom torch.nn.parameter import Parameter\n\n\n# Acknowledgements: https://github.com/wohlert/semi-supervised-pytorch\nclass Standardize(Module):\n \"\"\"\n Applies (element-wise) standardization with trainable translation parameter μ and scale parameter σ, i.e. computes\n (x - μ) / σ where '/' is applied element-wise.\n\n Args:\n in_features: size of each input sample\n out_features: size of each output sample\n bias: If set to False, the layer will not learn a translation parameter μ.\n Default: ``True``\n\n Attributes:\n mu: the learnable translation parameter μ.\n std: the learnable scale parameter σ.\n \"\"\"\n __constants__ = ['mu']\n\n def __init__(self, in_features, bias=True, eps=1e-6):\n super(Standardize, self).__init__()\n self.in_features = in_features\n self.out_features = in_features\n self.eps = eps\n self.std = Parameter(torch.Tensor(in_features))\n if bias:\n self.mu = Parameter(torch.Tensor(in_features))\n else:\n self.register_parameter('mu', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n init.constant_(self.std, 1)\n if self.mu is not None:\n init.constant_(self.mu, 0)\n\n def forward(self, x):\n if self.mu is not None:\n x -= self.mu\n x = torch.div(x, self.std + self.eps)\n return x\n\n def extra_repr(self):\n return 'in_features={}, out_features={}, bias={}'.format(\n self.in_features, self.out_features, self.mu is not None\n )\n",
"import torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\n\nfrom .layers.stochastic import GaussianSample\nfrom .inference.distributions import log_standard_gaussian, log_gaussian\n\n\n# Acknowledgements: https://github.com/wohlert/semi-supervised-pytorch\nclass Encoder(nn.Module):\n \"\"\"\n Encoder, i.e. the inference network.\n\n Attempts to infer the latent probability distribution p(z|x) from the data x by fitting a\n variational distribution q_φ(z|x). Returns the two parameters of the distribution (µ, log σ²).\n\n :param dims: dimensions of the network given by [input_dim, [hidden_dims], latent_dim].\n \"\"\"\n\n def __init__(self, dims, sample_layer=GaussianSample):\n super(Encoder, self).__init__()\n\n [x_dim, h_dim, z_dim] = dims\n neurons = [x_dim, *h_dim]\n linear_layers = [nn.Linear(neurons[i-1], neurons[i]) for i in range(1, len(neurons))]\n\n self.hidden = nn.ModuleList(linear_layers)\n self.sample = sample_layer(h_dim[-1], z_dim)\n\n def forward(self, x):\n for layer in self.hidden:\n x = F.relu(layer(x))\n return self.sample(x)\n\n\nclass Decoder(nn.Module):\n \"\"\"\n Decoder, i.e. the generative network.\n\n Generates samples from an approximation p_θ(x|z) of the original distribution p(x)\n by transforming a latent representation z.\n\n :param dims: dimensions of the network given by [latent_dim, [hidden_dims], input_dim].\n \"\"\"\n\n def __init__(self, dims):\n super(Decoder, self).__init__()\n\n [z_dim, h_dim, x_dim] = dims\n neurons = [z_dim, *h_dim]\n linear_layers = [nn.Linear(neurons[i-1], neurons[i]) for i in range(1, len(neurons))]\n\n self.hidden = nn.ModuleList(linear_layers)\n self.reconstruction = nn.Linear(h_dim[-1], x_dim)\n self.output_activation = nn.Sigmoid()\n\n def forward(self, x):\n for layer in self.hidden:\n x = F.relu(layer(x))\n return self.output_activation(self.reconstruction(x))\n\n\nclass VariationalAutoencoder(nn.Module):\n \"\"\"\n Variational Autoencoder (VAE) (Kingma and Welling, 2013) model consisting of an encoder-decoder pair for which\n a variational distribution is fitted to the encoder.\n Also known as the M1 model in (Kingma et al., 2014)\n\n :param dims: dimensions of the networks given by [input_dim, latent_dim, [hidden_dims]]. Encoder and decoder\n are build symmetrically.\n \"\"\"\n\n def __init__(self, dims):\n super(VariationalAutoencoder, self).__init__()\n\n [x_dim, z_dim, h_dim] = dims\n self.z_dim = z_dim\n self.flow = None\n\n self.encoder = Encoder([x_dim, h_dim, z_dim])\n self.decoder = Decoder([z_dim, list(reversed(h_dim)), x_dim])\n self.kl_divergence = 0\n\n # Init linear layers\n for m in self.modules():\n if isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n\n def _kld(self, z, q_param, p_param=None):\n \"\"\"\n Computes the KL-divergence of some latent variable z.\n\n KL(q||p) = - ∫ q(z) log [ p(z) / q(z) ] = - E_q[ log p(z) - log q(z) ]\n\n :param z: sample from q-distribuion\n :param q_param: (mu, log_var) of the q-distribution\n :param p_param: (mu, log_var) of the p-distribution\n :return: KL(q||p)\n \"\"\"\n (mu, log_var) = q_param\n\n if self.flow is not None:\n f_z, log_det_z = self.flow(z)\n qz = log_gaussian(z, mu, log_var) - sum(log_det_z)\n z = f_z\n else:\n qz = log_gaussian(z, mu, log_var)\n\n if p_param is None:\n pz = log_standard_gaussian(z)\n else:\n (mu, log_var) = p_param\n pz = log_gaussian(z, mu, log_var)\n\n kl = qz - pz\n\n return kl\n\n def add_flow(self, flow):\n self.flow = flow\n\n def forward(self, x, y=None):\n \"\"\"\n Runs a forward pass on a data point through the VAE model to provide its reconstruction and the parameters of\n the variational approximate distribution q.\n\n :param x: input data\n :return: reconstructed input\n \"\"\"\n z, q_mu, q_log_var = self.encoder(x)\n self.kl_divergence = self._kld(z, (q_mu, q_log_var))\n rec = self.decoder(z)\n\n return rec\n\n def sample(self, z):\n \"\"\"\n Given z ~ N(0, I) generates a sample from the learned distribution based on p_θ(x|z).\n\n :param z: (torch.autograd.Variable) latent normal variable\n :return: (torch.autograd.Variable) generated sample\n \"\"\"\n return self.decoder(z)\n"
] |
[
[
"torch.nn.init.constant_",
"torch.div",
"torch.Tensor"
],
[
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.nn.init.xavier_normal_",
"torch.nn.Sigmoid"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BlueAmulet/BasicSR
|
[
"7040913d8659a05af4c2428feb71c260efbf1e9c"
] |
[
"codes/models/modules/loss.py"
] |
[
"import torch\nimport torch.nn as nn\nimport math\nimport numbers\nfrom torch.nn import functional as F\nimport numpy as np\n\ndef LoG(imgHF): #Laplacian of Gaussian\n # The LoG operator calculates the second spatial derivative of an image. \n # This means that in areas where the image has a constant intensity (i.e. \n # where the intensity gradient is zero), the LoG response will be zero. \n # In the vicinity of a change in intensity, however, the LoG response \n # will be positive on the darker side, and negative on the lighter side. \n # This means that at a reasonably sharp edge between two regions of \n # uniform but different intensities, the LoG response will be:\n # - zero at a long distance from the edge,\n # - positive just to one side of the edge,\n # - negative just to the other side of the edge,\n # - zero at some point in between, on the edge itself.\n # The enhancement sharpens the edges but also increases noise. If the \n # original image is filtered with a simple Laplacian (a LoG filter \n # with a very narrow Gaussian), the resulting output is rather noisy.\n # Combining this output with the original will give a noisy result. \n # On the other hand, using a larger σ for the Gaussian will reduce \n # the noise, but the sharpening effect will be reduced. \n\n # The 2-D LoG can be approximated by a 5 by 5 convolution kernel such as:\n weight = [\n [0, 0, 1, 0, 0],\n [0, 1, 2, 1, 0],\n [1, 2, -16, 2, 1],\n [0, 1, 2, 1, 0],\n [0, 0, 1, 0, 0]\n ]\n weight = np.array(weight)\n weight_np = np.zeros((1, 1, 5, 5))\n \n \"\"\"\n # 3x3 Laplacian kernels (without Gaussian smoothing)\n # These kernels are approximating a second derivative measurement on \n # the image, they are very sensitive to noise. To counter this, the \n # image is often Gaussian smoothed before applying the Laplacian filter.\n # Note that the output can contain negative and non-integer values, \n # so for display purposes the image has been normalized.\n ## 3x3 v1:\n weight = [\n [0, -1, 0],\n [-1, 4, -1],\n [0, -1, 0]\n ]\n \n ## 3x3 v2:\n # weight = [\n # [-1, -1, -1],\n # [-1, 8, -1],\n # [-1, -1, -1]\n # ]\n\n weight = np.array(weight)\n weight_np = np.zeros((1, 1, 3, 3))\n \"\"\"\n \n weight_np[0, 0, :, :] = weight\n weight_np = np.repeat(weight_np, imgHF.shape[1], axis=1)\n weight_np = np.repeat(weight_np, imgHF.shape[0], axis=0)\n\n weight = torch.from_numpy(weight_np).type(torch.FloatTensor).to('cuda:0')\n \n return nn.functional.conv2d(imgHF, weight, padding=1)\n\nclass GaussianSmoothing(nn.Module):\n def __init__(self, channels, kernel_size=15, sigma=3, dim=2):\n super(GaussianSmoothing, self).__init__()\n if isinstance(kernel_size, numbers.Number):\n kernel_size = [kernel_size] * dim\n if isinstance(sigma, numbers.Number):\n sigma = [sigma] * dim\n\n kernel = 1\n meshgrids = torch.meshgrid(\n [\n torch.arange(size, dtype=torch.float32)\n for size in kernel_size\n ]\n )\n for size, std, mgrid in zip(kernel_size, sigma, meshgrids):\n mean = (size - 1) / 2\n kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \\\n torch.exp(-((mgrid - mean) / std) ** 2 / 2)\n\n kernel = kernel / torch.sum(kernel)\n kernel = kernel.view(1, 1, *kernel.size())\n kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))\n\n self.register_buffer('weight', kernel)\n self.groups = channels\n\n if dim == 1:\n self.conv = F.conv1d\n elif dim == 2:\n self.conv = F.conv2d\n elif dim == 3:\n self.conv = F.conv3d\n else:\n raise RuntimeError(\n 'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)\n )\n\n def forward(self, input):\n return self.conv(input, weight=self.weight, groups=self.groups)\n\nclass CharbonnierLoss(nn.Module):\n \"\"\"Charbonnier Loss (L1)\"\"\"\n\n def __init__(self, eps=1e-6):\n super(CharbonnierLoss, self).__init__()\n self.eps = eps\n\n def forward(self, x, y):\n b, c, h, w = y.size()\n diff = x - y\n loss = torch.sum(torch.sqrt(diff * diff + self.eps))\n #loss = torch.sum(torch.sqrt((x - y).pow(2) + self.eps **2)) / x.shape[0]\n return loss/(c*b*h*w)\n \n# Define GAN loss: [vanilla | lsgan | wgan-gp]\n# https://tuatini.me/creating-and-shipping-deep-learning-models-into-production/\nclass GANLoss(nn.Module):\n def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):\n super(GANLoss, self).__init__()\n self.gan_type = gan_type.lower()\n self.real_label_val = real_label_val\n self.fake_label_val = fake_label_val\n\n if self.gan_type == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif self.gan_type == 'lsgan':\n self.loss = nn.MSELoss()\n elif self.gan_type == 'srpgan':\n self.loss = nn.BCELoss() #0.001 * F.binary_cross_entropy(d_sr_out, torch.ones_like(d_sr_out))\n elif self.gan_type == 'wgan-gp':\n\n def wgan_loss(input, target):\n # target is boolean\n return -1 * input.mean() if target else input.mean()\n\n self.loss = wgan_loss\n else:\n raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))\n\n def get_target_label(self, input, target_is_real):\n if self.gan_type == 'wgan-gp':\n return target_is_real\n if target_is_real:\n return torch.empty_like(input).fill_(self.real_label_val) #torch.ones_like(d_sr_out)\n else:\n return torch.empty_like(input).fill_(self.fake_label_val) #torch.zeros_like(d_sr_out)\n\n def forward(self, input, target_is_real):\n target_label = self.get_target_label(input, target_is_real)\n loss = self.loss(input, target_label)\n return loss\n\n\nclass GradientPenaltyLoss(nn.Module):\n def __init__(self, device=torch.device('cpu')):\n super(GradientPenaltyLoss, self).__init__()\n self.register_buffer('grad_outputs', torch.Tensor())\n self.grad_outputs = self.grad_outputs.to(device)\n\n def get_grad_outputs(self, input):\n if self.grad_outputs.size() != input.size():\n self.grad_outputs.resize_(input.size()).fill_(1.0)\n return self.grad_outputs\n\n def forward(self, interp, interp_crit):\n grad_outputs = self.get_grad_outputs(interp_crit)\n grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, \\\n grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]\n grad_interp = grad_interp.view(grad_interp.size(0), -1)\n grad_interp_norm = grad_interp.norm(2, dim=1)\n\n loss = ((grad_interp_norm - 1)**2).mean()\n return loss\n\n\nclass HFENLoss(nn.Module): # Edge loss with pre_smooth\n # In order to further penalize the diferences in fine details, such as edges, \n # a gradient-domain L1 loss can be used, where each gradient ∇(·) is computed \n # using a High Frequency Error Norm (HFEN). The metric uses a Laplacian of\n # Gaussian kernel for edge-detection. The Laplacian works to detect\n # edges, but is sensitive to noise, so the image can be pre-smoothed with a\n # Gaussian filter first to make edge-detection work better. The recommended \n # parameter of σ = 1.5 for Gaussian kernel size can be used.\n def __init__(self, loss_f='L1', device='cuda:0', pre_smooth=True, relative=False):\n super(HFENLoss, self).__init__()\n self.device = device\n self.loss_f = loss_f #loss function\n self.pre_smooth = pre_smooth\n self.relative = relative\n self.laplacian = False\n\n if loss_f=='l2':\n self.criterion = nn.MSELoss(reduction='sum').to(device)\n elif loss_f=='elastic':\n self.criterion = ElasticLoss(reduction='sum').to(device)\n elif loss_f=='cb':\n self.criterion = CharbonnierLoss().to(device)\n else: #if loss_f=='l1':\n self.criterion = nn.L1Loss(reduction='sum').to(device)\n\n def forward(self, input, target, eps=0.01):\n c = input.shape[1]\n\n # Note that, since the range of color values can be significantly\n # large, we apply a logarithmic function to the ground truth image to\n # compress its range before computing the loss, i.e., c = log(1 + c˜),\n # where ˜c is the ground truth image in the linear domain.\n # Note: This may not hold true if image range is already [0,1] or [-1,1]\n # input = torch.log(1 + input) #(eps=1e-7)\n \n if self.pre_smooth:\n # As Laplace operator may detect edges as well as noise (isolated, out-of-range), \n # it may be desirable to smooth the image first by a convolution with a Gaussian \n # kernel of width sigma. This will add an additional Gaussian smoothing before LoG\n # to reduce noise and only focus on Edge loss.\n # Configure Gaussian kernel\n smoothing = GaussianSmoothing(c, 11, 1.5) #default: (c, 15, 1.5) | paper: (3, 11, 1.5) | simpler: (c, 5, 1)\n smoothing = smoothing.to(self.device) #.to('cuda:0')\n # Pad input and target\n input_smooth = nn.functional.pad(input, (2, 2, 2, 2), mode='reflect')\n target_smooth = nn.functional.pad(target, (2, 2, 2, 2), mode='reflect')\n # Apply Gaussian kernel \n input_smooth = smoothing(input_smooth)\n target_smooth = smoothing(target_smooth)\n else:\n if self.relative: \n if self.laplacian:\n input_smooth = input\n target_smooth = target\n else:\n input_smooth = nn.functional.pad(input, (1, 1, 1, 1), mode='reflect')\n target_smooth = nn.functional.pad(target, (1, 1, 1, 1), mode='reflect')\n else:\n input_smooth = input\n target_smooth = target\n \n # If using Gaussian+laplacian instead of LoG\n # Needs more testing, look at SSIM that also uses gaussian convolution\n if self.laplacian:\n #Gaussian, needs to be applied for \"Laplacian of Gauss\" (LoG)\n if self.pre_smooth:\n pad_size = 11 #5,7,9,11\n LoG_kernel = 17 #5,9,13,17\n else: \n pad_size = 7 #>= 2\n LoG_kernel = (2*pad_size)+1 #LoG-> pad: 5 -> 2, 15 -> 7, etc\n gaussian = GaussianSmoothing(c, LoG_kernel, 1.5).to(self.device) #default: (c, 15, 1.5) | paper: (3, 11, 1.5) | simpler: (c, 5, 1)\n input_smooth = nn.functional.pad(input_smooth, (pad_size,pad_size,pad_size,pad_size), mode='reflect')\n target_smooth = nn.functional.pad(target_smooth, (pad_size,pad_size,pad_size,pad_size), mode='reflect')\n # Apply Gaussian kernel \n input_smooth = gaussian(input_smooth)\n target_smooth = gaussian(target_smooth)\n \n \"\"\"\n if self.loss_f == 'L2':\n x = torch.sum(torch.pow((LoG(input_smooth-target_smooth)), 2))\n elif self.loss_f == 'elastic':\n x = torch.sum(torch.pow((LoG(input_smooth-target_smooth)), 2))\n else: #loss_f == 'L1':\n x = torch.abs(LoG(input_smooth-target_smooth)).sum()\n \"\"\"\n \n if self.relative:\n # Comparing to the original HFEN, introducing the division by |c|+epsilon \n # better models the human vision system’s sensitivity to variations\n # in the dark areas. (where epsilon = 0.01, to prevent values of 0 in the\n # denominator)\n # x = self.criterion(LoG(input_smooth)/(target+eps),LoG(target_smooth)/(target+eps))\n x = self.criterion(LoG(input_smooth)/(target+eps).norm(),LoG(target_smooth)/(target+eps).norm())\n # x = self.criterion(lap.Laplacian(LoG_kernel)(input_smooth)/(target+eps),lap.Laplacian(LoG_kernel)(target_smooth)/(target+eps))\n \n else:\n # To calculate the HFEN, a 5x5 rotationally symmetric Laplacian of Gaussian \n # (LoG) filter is used to capture the edges in the absolute reconstruction error \n # image and the HFEN is calculated as the Frobenius norm of the error edge image.\n # x = self.criterion(LoG(input_smooth),LoG(target_smooth)) # No normalization (HFEN needs normalization, can use a case later)\n x = self.criterion(LoG(input_smooth),LoG(target_smooth))/torch.sum(torch.pow(LoG(target_smooth), 2))\n # x = self.criterion(lap.Laplacian(LoG_kernel)(input_smooth),lap.Laplacian(LoG_kernel)(target_smooth))/torch.sum(torch.pow(lap.Laplacian(LoG_kernel)(target_smooth), 2))\n \n # if self.normalized:\n # if self.loss_f == 'l2':\n # x = x / torch.sum(torch.pow(LoG(target), 2))\n ## x = x / target.norm()\n # else: #elif self.loss_f == 'l1':\n # x = x / torch.sum(torch.abs(LoG(target)))\n \n return x\n\nclass TVLoss(nn.Module):\n def __init__(self, tvloss_weight=1, p=1):\n super(TVLoss, self).__init__()\n self.tvloss_weight = tvloss_weight\n assert p in [1, 2]\n self.p = p\n\n def forward(self, x): \n batch_size = x.size()[0]\n img_shape = x.shape\n h_x = x.size()[2]\n w_x = x.size()[3]\n count_h = self.tensor_size(x[:, :, 1:, :])\n count_w = self.tensor_size(x[:, :, :, 1:])\n \n if len(img_shape) == 3 or len(img_shape) == 4:\n if self.p == 1:\n # loss = torch.sum(torch.abs(x[:,:,:-1,:] - x[:,:,1:,:])) + torch.sum(torch.abs(x[:,:,:,:-1] - x[:,:,:,1:]))\n # return self.tvloss_weight * 2 * loss/((count_h/2+count_w/2)*batch_size) #/ x.size(0) / (x.size(2)-1) / (x.size(3)-1)\n \n # Alternative calculation, same results:\n #h_tv = torch.abs((x[:, :, 1:, :] - x[:, :, :h_x - 1, :])).sum()\n #w_tv = torch.abs((x[:, :, :, 1:] - x[:, :, :, :w_x - 1])).sum()\n #return self.tvloss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size # For use with the alternative calculation\n \n # Alternative calculation 2: https://kornia.readthedocs.io/en/latest/_modules/kornia/losses/total_variation.html#total_variation\n pixel_dif1 = x[..., 1:, :] - x[..., :-1, :]\n pixel_dif2 = x[..., :, 1:] - x[..., :, :-1]\n reduce_axes = (-3, -2, -1)\n loss = self.tvloss_weight*(pixel_dif1.abs().sum(dim=reduce_axes) + pixel_dif2.abs().sum(dim=reduce_axes)) # Calculates the TV loss for each image in the batch\n loss = loss.sum() / batch_size # averages the TV loss all the images in the batch \n return loss\n \n # loss = self.tvloss_weight*((x[:,:,1:,:] - x[:,:,:-1,:]).abs().sum(dim=(-3, -2, -1)) + (x[:,:,:,1:] - x[:,:,:,:-1]).abs().sum(dim=(-3, -2, -1)))\n # loss = loss.sum() / batch_size # averages the TV loss all the images in the batch \n # return loss\n \n else:\n #loss = torch.sum(torch.sqrt((x[:,:,:-1,:] - x[:,:,1:,:])**2)) + torch.sum(torch.sqrt((x[:,:,:,:-1] - x[:,:,:,1:])**2)) # Doesn't work, magnitude is too large\n #return self.tvloss_weight * 2 * loss/((count_h/2+count_w/2)*batch_size) #/ x.size(0) / (x.size(2)-1) / (x.size(3)-1) #For use with the alternative calculation that doesn't work yet\n\n # Alternative calculation: # This one works\n # h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()\n # w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()\n # return self.tvloss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size\n \n # Alternative calculation 2: https://kornia.readthedocs.io/en/latest/_modules/kornia/losses/total_variation.html#total_variation\n pixel_dif1 = x[..., 1:, :] - x[..., :-1, :]\n pixel_dif2 = x[..., :, 1:] - x[..., :, :-1]\n reduce_axes = (-3, -2, -1)\n loss = self.tvloss_weight*(torch.pow(pixel_dif1,2).sum(dim=reduce_axes) + torch.pow(pixel_dif2,2).sum(dim=reduce_axes)) # Calculates the TV loss for each image in the batch\n loss = loss.sum() / batch_size # averages the TV loss all the images in the batch \n return loss\n \n else:\n raise ValueError(\"Expected input tensor to be of ndim 3 or 4, but got \" + str(len(img_shape)))\n\n #return self.tvloss_weight * 2 *loss\n \n @staticmethod\n def tensor_size(t):\n return t.size()[1] * t.size()[2] * t.size()[3]\n \nclass ElasticLoss(nn.Module):\n def __init__(self, a=0.2, reduction='mean'): #a=0.5 default\n super(ElasticLoss, self).__init__()\n self.alpha = torch.FloatTensor([a, 1 - a]).to('cuda:0')\n self.reduction = reduction\n\n def forward(self, input, target):\n if not isinstance(input, tuple):\n input = (input,)\n\n for i in range(len(input)):\n l2 = nn.functional.mse_loss(input[i].squeeze(), target.squeeze()).mul(self.alpha[0], reduction=self.reduction)\n l1 = nn.functional.l1_loss(input[i].squeeze(), target.squeeze()).mul(self.alpha[1], reduction=self.reduction)\n loss = l1 + l2\n\n return loss\n\nclass RelativeL1(nn.Module):\n # Comparing to the regular L1, introducing the division by |c|+epsilon \n # better models the human vision system’s sensitivity to variations\n # in the dark areas. (where epsilon = 0.01, to prevent values of 0 in the\n # denominator)\n def __init__(self):\n super().__init__()\n self.criterion = torch.nn.L1Loss()\n\n def forward(self, input, target):\n base = target +.01\n\n return self.criterion(input/base, target/base)\n\n\n# https://github.com/dmarnerides/hdr-expandnet/blob/master/train.py\n# Can be used to replace L1 pixel loss, but includes a cosine similarity term \n# to ensure color correctness of the RGB vectors of each pixel.\n# lambda is a constant factor that adjusts the contribution of the cosine similarity term\n# It provides improved color stability, especially for low luminance values, which\n# are frequent in HDR images, since slight variations in any of theRGB components of these \n# low values do not contribute much totheL1loss, but they may however cause noticeable \n# color shifts. More in the paper: https://arxiv.org/pdf/1803.02266.pdf\nclass L1CosineSim(nn.Module):\n def __init__(self, loss_lambda=5):\n super(L1CosineSim, self).__init__()\n self.similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-20)\n self.l1_loss = nn.L1Loss()\n self.loss_lambda = loss_lambda\n\n def forward(self, x, y):\n cosine_term = (1 - self.similarity(x, y)).mean()\n return self.l1_loss(x, y) + self.loss_lambda * cosine_term\n\n\n\"\"\" \nclass LossCombo(nn.Module):\n def __init__(self, monitor_writer, *losses):\n super().__init__()\n self.monitor_writer = monitor_writer\n pass\n\n self.losses = []\n self.losses_names = []\n self.factors = []\n\n for name, loss, factor in losses:\n self.losses.append(loss)\n self.losses_names.append(name)\n self.factors.append(factor)\n\n self.add_module(name, loss)\n\n def multi_gpu(self):\n pass\n #self.losses = [nn.DataParallel(x) for x in self.losses]\n\n def forward(self, input, target, additional_losses):\n loss_results = []\n for idx, loss in enumerate(self.losses):\n loss_results.append(loss(input, target))\n\n for name, loss_result, factor in zip(self.losses_names, loss_results, self.factors):\n #print(loss_result)\n self.monitor_writer.add_scalar(name, loss_result*factor)\n\n for name, loss_result, factor in additional_losses:\n loss_result = loss_result.mean()\n #print(loss_result)\n self.monitor_writer.add_scalar(name, loss_result*factor)\n\n\n total_loss = sum([factor*loss_result for factor, loss_result in zip(self.factors, loss_results)]) + sum([factor*loss_result.mean() for name, loss_result, factor in additional_losses])\n self.monitor_writer.add_scalar(\"total_loss\", total_loss)\n\n return total_loss\n\"\"\""
] |
[
[
"torch.sum",
"torch.nn.BCEWithLogitsLoss",
"torch.FloatTensor",
"torch.device",
"torch.nn.L1Loss",
"torch.pow",
"torch.sqrt",
"torch.from_numpy",
"torch.nn.CosineSimilarity",
"torch.arange",
"numpy.repeat",
"numpy.zeros",
"torch.autograd.grad",
"torch.nn.functional.pad",
"torch.empty_like",
"torch.nn.functional.conv2d",
"torch.nn.BCELoss",
"torch.exp",
"numpy.array",
"torch.Tensor",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhafen/galaxy-dive
|
[
"e1127da25d10f699b3ada01b1b4635255f4f3917",
"e1127da25d10f699b3ada01b1b4635255f4f3917"
] |
[
"galaxy_dive/trends/data_products.py",
"galaxy_dive/tests/test_read_data/test_metafile.py"
] |
[
"#!/usr/bin/env python\n'''Compilation of functions for interfacing with miscellanious data products.\n\n@author: Zach Hafen\n@contact: [email protected]\n@status: Development\n'''\n\nimport copy\nimport numpy as np\nimport os\nimport pandas as pd\n\n########################################################################\n\ndef tidal_tensor_data_grudic(\n snum,\n ids = None,\n data_dir = '/work/03532/mgrudic/tidal_tensor/tidal_tensor_data',\n):\n '''Load data Mike Grudic processed that contains Tidal Tensor, velocity\n dispersion, and items used for calculating the aforementioned quantities.\n\n Args:\n snum (int): Snapshot to retrieve the data for.\n\n ids (array-like): IDs to retrieve. Defaults to all.\n\n data_dir (str): Path to directory containing data.\n\n Returns:\n pandas.DataFrame\n DataFrame containing quantities. When given an ID not in the data\n returns NaN values for that ID.\n '''\n\n def invalid_data_result():\n '''Results when the data is invalid in some form.'''\n base_arr = np.full( len( ids ), np.nan )\n standin_data = {}\n data_keys = [\n 'ID',\n 'Txx',\n 'Tyy',\n 'Tzz',\n 'Txy',\n 'Tyz',\n 'Tzx',\n 'sigma_v',\n 'r_search',\n 'cond_num',\n ]\n for key in data_keys:\n standin_data[key] = copy.deepcopy( base_arr )\n standin_data['ID'] = ids\n df = pd.DataFrame( standin_data )\n df = df.set_index( 'ID' )\n\n return df\n\n # Load the data\n filename = 'tidal_tensor_{}.npy'.format( snum )\n file_path = os.path.join( data_dir, filename )\n try:\n full_arr = np.load( file_path )\n except FileNotFoundError:\n return invalid_data_result()\n \n # Convert to a pandas data frame to get the selected IDs out.\n data = {\n 'ID': full_arr[:,0].astype( int ),\n 'Txx': full_arr[:,1],\n 'Tyy': full_arr[:,2],\n 'Tzz': full_arr[:,3],\n 'Txy': full_arr[:,4],\n 'Tyz': full_arr[:,5],\n 'Tzx': full_arr[:,6],\n 'sigma_v': full_arr[:,7],\n 'r_search': full_arr[:,8],\n 'cond_num': full_arr[:,9],\n }\n df = pd.DataFrame( data, )\n df = df.set_index( 'ID' )\n\n # Select on IDs\n if ids is not None:\n try:\n df = df.loc[ids]\n except KeyError:\n return invalid_data_result()\n\n return df\n",
"#!/usr/bin/env python\n'''Testing for read_metafile.py\n\n@author: Zach Hafen\n@contact: [email protected]\n@status: Development\n'''\n\nimport numpy as np\nimport numpy.testing as npt\nimport unittest\n\nimport galaxy_dive.read_data.metafile as read_metafile\n\nsdir = './tests/data/sdir'\nsdir2 = './tests/data/sdir2'\n\n########################################################################\n\nclass TestMetafileReader( unittest.TestCase ):\n\n def setUp( self ):\n\n self.metafile_reader = read_metafile.MetafileReader( sdir )\n\n ########################################################################\n\n def test_get_snapshot_times( self ):\n\n expected = 13.55350759 # Time in Gyr for snapshot 580\n\n self.metafile_reader.get_snapshot_times()\n\n actual = self.metafile_reader.snapshot_times['time[Gyr]'][580]\n\n npt.assert_allclose( expected, actual )\n\n ########################################################################\n\n def test_get_snapshot_times_old_filename( self ):\n\n expected = 0.0049998745585804194\n\n # Give it the right directory\n self.metafile_reader.sdir = sdir2\n\n self.metafile_reader.get_snapshot_times()\n\n actual = self.metafile_reader.snapshot_times['redshift'][439]\n\n npt.assert_allclose( expected, actual )\n\n ########################################################################\n\n def test_get_used_parameters( self ):\n\n OmegaBaryon_expected = 0.0455\n\n self.metafile_reader.get_used_parameters()\n\n OmegaBaryon_actual = float( self.metafile_reader.used_parameters['OmegaBaryon'] )\n\n npt.assert_allclose( OmegaBaryon_expected, OmegaBaryon_actual )\n"
] |
[
[
"numpy.load",
"pandas.DataFrame"
],
[
"numpy.testing.assert_allclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kthyng/octant
|
[
"65591d87797fa74e0c092d5f50fb0cd703eb412e",
"65591d87797fa74e0c092d5f50fb0cd703eb412e"
] |
[
"octant/python-gsw/gsw/gibbs/practical_salinity.py",
"octant/python-gsw/gsw/gibbs/basic_thermodynamic_t.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import division\n\nimport numpy as np\n\nfrom library import Hill_ratio_at_SP2\nfrom gsw.utilities import match_args_return\n\n__all__ = [\n 'SP_from_C',\n 'C_from_SP',\n 'SP_from_R',\n 'R_from_SP',\n 'SP_salinometer',\n 'SP_from_SK',\n 'SK_from_SP'\n ]\n\n# Constants:\na = (0.0080, -0.1692, 25.3851, 14.0941, -7.0261, 2.7081)\n\nb = (0.0005, -0.0056, -0.0066, -0.0375, 0.0636, -0.0144)\n\nc = (0.6766097, 2.00564e-2, 1.104259e-4, -6.9698e-7, 1.0031e-9)\n\nd = (3.426e-2, 4.464e-4, 4.215e-1, -3.107e-3)\n\ne = (2.070e-5, -6.370e-10, 3.989e-15)\n\nP = (4.577801212923119e-3, 1.924049429136640e-1, 2.183871685127932e-5,\n -7.292156330457999e-3, 1.568129536470258e-4, -1.478995271680869e-6,\n 9.086442524716395e-4, -1.949560839540487e-5, -3.223058111118377e-6,\n 1.175871639741131e-7, -7.522895856600089e-5, -2.254458513439107e-6,\n 6.179992190192848e-7, 1.005054226996868e-8, -1.923745566122602e-9,\n 2.259550611212616e-6, 1.631749165091437e-7, -5.931857989915256e-9,\n -4.693392029005252e-9, 2.571854839274148e-10, 4.198786822861038e-12)\n\nq = (5.540896868127855e-5, 2.015419291097848e-1, -1.445310045430192e-5,\n -1.567047628411722e-2, 2.464756294660119e-4, -2.575458304732166e-7,\n 5.071449842454419e-3, -9.081985795339206e-5, -3.635420818812898e-6,\n 2.249490528450555e-8, -1.143810377431888e-3, 2.066112484281530e-5,\n 7.482907137737503e-7, 4.019321577844724e-8, -5.755568141370501e-10,\n 1.120748754429459e-4, -2.420274029674485e-6, -4.774829347564670e-8,\n -4.279037686797859e-9, -2.045829202713288e-10, 5.025109163112005e-12)\n\nr = (3.432285006604888e-3, 1.672940491817403e-1, 2.640304401023995e-5,\n 1.082267090441036e-1, -6.296778883666940e-5, -4.542775152303671e-7,\n -1.859711038699727e-1, 7.659006320303959e-4, -4.794661268817618e-7,\n 8.093368602891911e-9, 1.001140606840692e-1, -1.038712945546608e-3,\n -6.227915160991074e-6, 2.798564479737090e-8, -1.343623657549961e-10,\n 1.024345179842964e-2, 4.981135430579384e-4, 4.466087528793912e-6,\n 1.960872795577774e-8, -2.723159418888634e-10, 1.122200786423241e-12)\n\nu = (5.180529787390576e-3, 1.052097167201052e-3, 3.666193708310848e-5,\n 7.112223828976632, -3.631366777096209e-4, -7.336295318742821e-7,\n -1.576886793288888e+2, -1.840239113483083e-3, 8.624279120240952e-6,\n 1.233529799729501e-8, 1.826482800939545e+3, 1.633903983457674e-1,\n -9.201096427222349e-5, -9.187900959754842e-8, -1.442010369809705e-10,\n -8.542357182595853e+3, -1.408635241899082, 1.660164829963661e-4,\n 6.797409608973845e-7, 3.345074990451475e-10, 8.285687652694768e-13)\n\nk = 0.0162\n\na, b, c, d, e, P, q, r, u, k = map(np.asarray, (a, b, c, d, e, P, q, r, u, k))\n\n\n@match_args_return\ndef SP_from_C(C, t, p):\n r\"\"\"Calculates Practical Salinity, SP, from conductivity, C, primarily\n using the PSS-78 algorithm. Note that the PSS-78 algorithm for Practical\n Salinity is only valid in the range 2 < SP < 42. If the PSS-78 algorithm\n produces a Practical Salinity that is less than 2 then the Practical\n Salinity is recalculated with a modified form of the Hill et al. (1986)\n formula. The modification of the Hill et al. (1986) expression is to ensure\n that it is exactly consistent with PSS-78 at SP = 2. Note that the input\n values of conductivity need to be in units of mS/cm (not S/m).\n\n Parameters\n ----------\n C : array\n conductivity [mS cm :sup:`-1`]\n t : array\n in-situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array\n sea pressure [dbar]\n (i.e. absolute pressure - 10.1325 dbar)\n\n Returns\n -------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n\n Examples\n --------\n TODO\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n References\n ----------\n .. [1] Culkin and Smith, 1980: Determination of the Concentration of\n Potassium Chloride Solution Having the Same Electrical Conductivity, at\n 15C and Infinite Frequency, as Standard Seawater of Salinity 35.0000\n (Chlorinity 19.37394), IEEE J. Oceanic Eng, 5, 22-23.\n\n .. [2] Hill, K.D., T.M. Dauphinee & D.J. Woods, 1986: The extension of the\n Practical Salinity Scale 1978 to low salinities. IEEE J. Oceanic Eng., 11,\n 109 - 112.\n\n .. [3] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. Appendix E.\n\n .. [4] Unesco, 1983: Algorithms for computation of fundamental properties\n of seawater. Unesco Technical Papers in Marine Science, 44, 53 pp.\n\n Modifications:\n 2011-04-01. Paul Barker, Trevor McDougall and Rich Pawlowicz.\n \"\"\"\n\n C, t, p = np.broadcast_arrays(C, t, p)\n\n t68 = t * 1.00024\n ft68 = (t68 - 15) / (1 + k * (t68 - 15))\n\n # The dimensionless conductivity ratio, R, is the conductivity input, C,\n # divided by the present estimate of C(SP=35, t_68=15, p=0) which is\n # 42.9140 mS/cm (=4.29140 S/m), (Culkin and Smith, 1980).\n\n R = 0.023302418791070513 * C # 0.023302418791070513 = 1./42.9140\n\n # rt_lc corresponds to rt as defined in the UNESCO 44 (1983) routines.\n rt_lc = c[0] + (c[1] + (c[2] + (c[3] + c[4] * t68) * t68) * t68) * t68\n Rp = (1 + (p * (e[0] + e[1] * p + e[2] * p ** 2)) /\n (1 + d[0] * t68 + d[1] * t68 ** 2 + (d[2] + d[3] * t68) * R))\n Rt = R / (Rp * rt_lc)\n\n Rt[Rt < 0] = np.nan\n Rtx = np.sqrt(Rt)\n\n SP = a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) * Rtx) *\n Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] + (b[4] + b[5] *\n Rtx) * Rtx) * Rtx) * Rtx) * Rtx)\n\n # The following section of the code is designed for SP < 2 based on the\n # Hill et al. (1986) algorithm. This algorithm is adjusted so that it is\n # exactly equal to the PSS-78 algorithm at SP = 2.\n\n I2, = np.nonzero(np.ravel(SP) < 2)\n if len(I2) > 0:\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n x = 400 * Rt[I2]\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n SP[I2] = Hill_ratio * SP_Hill_raw\n\n SP = np.maximum(SP, 0) # Ensure that SP is non-negative.\n\n return SP\n\n\ndef C_from_SP(SP, t, p):\n r\"\"\"Calculates conductivity, C, from (SP, t, p) using PSS-78 in the range\n 2 < SP < 42. If the input Practical Salinity is less than 2 then a modified\n form of the Hill et al. (1986) fomula is used for Practical Salinity. The\n modification of the Hill et al. (1986) expression is to ensure that it is\n exactly consistent with PSS-78 at SP = 2.\n\n The conductivity ratio returned by this function is consistent with the\n input value of Practical Salinity, SP, to 2x10^-14 psu over the full range\n of input parameters (from pure fresh water up to SP = 42 psu). This error\n of 2x10^-14 psu is machine precision at typical seawater salinities. This\n accuracy is achieved by having four different polynomials for the starting\n value of Rtx (the square root of Rt) in four different ranges of SP, and by\n using one and a half iterations of a computationally efficient modified\n Newton-Raphson technique to find the root of the equation.\n\n Parameters\n ----------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n t : array\n in-situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array\n sea pressure [dbar]\n (i.e. absolute pressure - 10.1325 dbar)\n\n Returns\n -------\n C : array\n conductivity [mS cm :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n Note that strictly speaking PSS-78 (Unesco, 1983) defines Practical\n Salinity in terms of the conductivity ratio, R, without actually\n specifying the value of C(35,15,0) (which we currently take to be\n 42.9140 mS/cm).\n\n Examples\n --------\n TODO\n\n References\n ----------\n .. [1] Hill, K.D., T.M. Dauphinee and D.J. Woods, 1986: The extension of\n the Practical Salinity Scale 1978 to low salinities. IEEE J. Oceanic Eng.,\n OE-11, 1, 109 - 112.\n\n .. [2] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See appendix E.\n\n .. [3] Unesco, 1983: Algorithms for computation of fundamental properties\n of seawater. Unesco Technical Papers in Marine Science, 44, 53 pp.\n \"\"\"\n\n C = 42.9140 * R_from_SP(SP, t, p)\n\n return C\n\n\n@match_args_return\ndef SP_from_R(R, t, p):\n r\"\"\"Calculates Practical Salinity, SP, from the conductivity ratio, R,\n primarily using the PSS-78 algorithm. Note that the PSS-78 algorithm for\n Practical Salinity is only valid in the range 2 < SP < 42. If the PSS-78\n algorithm produces a Practical Salinity that is less than 2 then the\n Practical Salinity is recalculated with a modified form of the Hill et al.\n (1986) formula. The modification of the Hill et al. (1986) expression are\n to ensure that it is exactly consistent with PSS-78 at SP = 2.\n\n Parameters\n ----------\n R : array_like\n conductivity ratio [unitless]\n t : array_like\n in-situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array\n sea pressure [dbar]\n (i.e. absolute pressure - 10.1325 dbar)\n\n Returns\n -------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n\n Examples\n --------\n TODO\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n References\n ----------\n .. [1] Culkin and Smith, 1980: Determination of the Concentration of\n Potassium Chloride Solution Having the Same Electrical Conductivity, at\n 15C and Infinite Frequency, as Standard Seawater of Salinity 35.0000\n (Chlorinity 19.37394), IEEE J. Oceanic Eng, 5, 22-23.\n\n .. [2] Hill, K.D., T.M. Dauphinee & D.J. Woods, 1986: The extension of the\n Practical Salinity Scale 1978 to low salinities. IEEE J. Oceanic Eng.,\n 11, 109 - 112.\n\n .. [3] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. Appendix E.\n\n .. [4] Unesco, 1983: Algorithms for computation of fundamental properties\n of seawater. Unesco Technical Papers in Marine Science, 44, 53 pp.\n\n Modifications:\n 2011-04-01. Paul Barker, Trevor McDougall and Rich Pawlowicz.\n \"\"\"\n\n R, t, p = np.broadcast_arrays(R, t, p)\n\n t68 = t * 1.00024\n ft68 = (t68 - 15) / (1 + k * (t68 - 15))\n\n # rt_lc corresponds to rt as defined in the UNESCO 44 (1983) routines.\n rt_lc = c[0] + (c[1] + (c[2] + (c[3] + c[4] * t68) * t68) * t68) * t68\n Rp = (1 + (p * (e[0] + e[1] * p + e[2] * p ** 2)) /\n (1 + d[0] * t68 + d[1] * t68 ** 2 + (d[2] + d[3] * t68) * R))\n Rt = R / (Rp * rt_lc)\n\n Rt[Rt < 0] = np.nan\n Rtx = np.sqrt(Rt)\n\n SP = a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) * Rtx) *\n Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] + (b[4] + b[5] *\n Rtx) * Rtx) * Rtx) * Rtx) * Rtx)\n\n # The following section of the code is designed for SP < 2 based on the\n # Hill et al. (1986) algorithm. This algorithm is adjusted so that it is\n # exactly equal to the PSS-78 algorithm at SP = 2.\n\n I2 = SP < 2\n if I2.any():\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n x = 400 * Rt[I2]\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n SP[I2] = Hill_ratio * SP_Hill_raw\n\n SP = np.maximum(SP, 0) # Ensure that SP is non-negative.\n\n return SP\n\n\n@match_args_return\ndef R_from_SP(SP, t, p):\n r\"\"\"Calculates conductivity ratio from (SP, t, p) using PSS-78 in the range\n 2 < SP < 42. If the input Practical Salinity is less than 2 then a\n modified form of the Hill et al. (1986) formula is used for Practical\n Salinity. The modification of the Hill et al. (1986) expression is to\n ensure that it is exactly consistent with PSS-78 at SP = 2.\n\n The conductivity ratio returned by this function is consistent with the\n input value of Practical Salinity, SP, to 2x10^-14 psu over the full range\n of input parameters (from pure fresh water up to SP = 42 psu). This error\n of 2x10^-14 psu is machine precision at typical seawater salinities. This\n accuracy is achieved by having four different polynomials for the starting\n value of Rtx (the square root of Rt) in four different ranges of SP, and by\n using one and a half iterations of a computationally efficient modified\n Newton-Raphson technique to find the root of the equation.\n\n Parameters\n ----------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n t : array_like\n in-situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array\n sea pressure [dbar]\n (i.e. absolute pressure - 10.1325 dbar)\n\n Returns\n -------\n R : array_like\n conductivity ratio [unitless]\n\n Examples\n --------\n TODO\n\n See Also\n --------\n TODO\n\n Notes\n -----\n Strictly speaking PSS-78 (Unesco, 1983) defines Practical Salinity in terms\n of the conductivity ratio, R, without actually specifying the value of\n C(35, 15, 0) (which we currently take to be 42.9140 mS cm^-1.\n Culkin and Smith, 1980).\n\n References\n ----------\n .. [1] Culkin and Smith, 1980: Determination of the Concentration of\n Potassium Chloride Solution Having the Same Electrical Conductivity, at\n 15C and Infinite Frequency, as Standard Seawater of Salinity 35.0000\n (Chlorinity 19.37394), IEEE J. Oceanic Eng, 5, 22-23.\n\n .. [2] Hill, K.D., T.M. Dauphinee & D.J. Woods, 1986: The extension of the\n Practical Salinity Scale 1978 to low salinities. IEEE J. Oceanic Eng.,\n 11, 109 - 112.\n\n .. [3] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. Appendix E.\n\n .. [4] Unesco, 1983: Algorithms for computation of fundamental properties\n of seawater. Unesco Technical Papers in Marine Science, 44, 53 pp.\n\n Modifications:\n 2011-04-06. Paul Barker, Trevor McDougall and Rich Pawlowicz.\n \"\"\"\n\n # These few lines ensure that SP is non-negative.\n if (SP < 0).any():\n raise ValueError('R_from_SP: SP must be non-negative!')\n\n SP, t, p = np.broadcast_arrays(SP, t, p)\n\n # Setting up the constants\n t68 = t * 1.00024\n ft68 = (t68 - 15) / (1 + k * (t68 - 15))\n\n x = np.sqrt(SP)\n Rtx = np.zeros_like(SP) * np.nan\n\n # Finding the starting value of Rtx, the square root of Rt, using four\n # different polynomials of SP and t68.\n # TODO: Test case that cover all those \"ifs\"\n I = SP >= 9\n if I.any():\n Rtx[I] = P[0] + x[I] * (P[1] + P[4] * t68[I] + x[I] * (P[3] + P[7] *\n t68[I] + x[I] * (P[6] + P[11] * t68[I] + x[I] * (P[10] + P[16] *\n t68[I] + x[I] * P[15])))) + t68[I] * (P[2] + t68[I] * (P[5] + x[I] *\n x[I] * (P[12] + x[I] * P[17]) + P[8] * x[I] + t68[I] * (P[9] + x[I] *\n (P[13] + x[I] * P[18]) + t68[I] * (P[14] + P[19] * x[I] + P[20] *\n t68[I]))))\n\n I = np.logical_and(SP >= 0.25, SP < 9)\n if I.any():\n Rtx[I] = q[0] + x[I] * (q[1] + q[4] * t68[I] + x[I] * (q[3] + q[7] *\n t68[I] + x[I] * (q[6] + q[11] * t68[I] + x[I] * (q[10] + q[16] *\n t68[I] + x[I] * q[15])))) + t68[I] * (q[2] + t68[I] * (q[5] + x[I] *\n x[I] * (q[12] + x[I] * q[17]) + q[8] * x[I] + t68[I] * (q[9] + x[I] *\n (q[13] + x[I] * q[18]) + t68[I] * (q[14] + q[19] * x[I] + q[20] *\n t68[I]))))\n\n I = np.logical_and(SP >= 0.003, SP < 0.25)\n if I.any():\n Rtx[I] = r[0] + x[I] * (r[1] + r[4] * t68[I] + x[I] * (r[3] + r[7] *\n t68[I] + x[I] * (r[6] + r[11] * t68[I] + x[I] * (r[10] + r[16] *\n t68[I] + x[I] * r[15])))) + t68[I] * (r[2] + t68[I] * (r[5] + x[I] *\n x[I] * (r[12] + x[I] * r[17]) + r[8] * x[I] + t68[I] * (r[9] + x[I] *\n (r[13] + x[I] * r[18]) + t68[I] * (r[14] + r[19] * x[I] + r[20] *\n t68[I]))))\n\n I = SP < 0.003\n if I.any():\n Rtx[I] = u[0] + x[I] * (u[1] + u[4] * t68[I] + x[I] * (u[3] + u[7] *\n t68[I] + x[I] * (u[6] + u[11] * t68[I] + x[I] * (u[10] + u[16] *\n t68[I] + x[I] * u[15])))) + t68[I] * (u[2] + t68[I] * (u[5] + x[I] *\n x[I] * (u[12] + x[I] * u[17]) + u[8] * x[I] + t68[I] * (u[9] + x[I] *\n (u[13] + x[I] * u[18]) + t68[I] * (u[14] + u[19] * x[I] + u[20] *\n t68[I]))))\n\n # Finding the starting value of dSP_dRtx, the derivative of SP with\n # respect to Rtx.\n dSP_dRtx = a[1] + (2 * a[2] + (3 * a[3] + (4 * a[4] + 5 * a[5] * Rtx) *\n Rtx) * Rtx) * Rtx + ft68 * (b[1] + (2 * b[2] + (3 * b[3] +\n (4 * b[4] + 5 * b[5] * Rtx) * Rtx) * Rtx) * Rtx)\n\n # TODO: Test case that cover all those \"ifs\"\n I2 = SP < 2\n if I2.any():\n x = 400 * (Rtx[I2] ** 2)\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n dSP_dRtx[I2] = (dSP_dRtx[I2] + a[0] * 800 * Rtx[I2] * (1.5 + 2 * x) /\n (part1 ** 2) + b[0] * ft68[I2] * (10 + sqrty * (20 +\n 30 * sqrty)) / (part2 ** 2))\n\n dSP_dRtx[I2] = Hill_ratio * dSP_dRtx[I2]\n\n \"\"\"One iteration through the modified Newton-Raphson method achieves an\n error in Practical Salinity of about 10^-12 for all combinations of the\n inputs. One and a half iterations of the modified Newton-Raphson method\n achieves a maximum error in terms of Practical Salinity of better than\n 2x10^-14 everywhere.\n\n We recommend one and a half iterations of the modified Newton-Raphson\n method.\"\"\"\n\n # Begin the modified Newton-Raphson method.\n SP_est = (a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) *\n Rtx) * Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] +\n (b[4] + b[5] * Rtx) * Rtx) * Rtx) * Rtx) * Rtx))\n\n # TODO: Test case that cover all those \"ifs\"\n I2 = SP_est < 2\n if I2.any():\n x = 400 * (Rtx[I2] ** 2)\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP_est[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n SP_est[I2] = Hill_ratio * SP_Hill_raw\n\n Rtx_old = Rtx\n Rtx = Rtx_old - (SP_est - SP) / dSP_dRtx\n\n # This mean value of Rtx, Rtxm, is the value of Rtx at which the\n # derivative dSP_dRtx is evaluated.\n Rtxm = 0.5 * (Rtx + Rtx_old)\n\n dSP_dRtx = a[1] + (2 * a[2] + (3 * a[3] + (4 * a[4] + 5 * a[5] *\n Rtxm) * Rtxm) * Rtxm) * Rtxm + ft68 * (b[1] + (2 * b[2] +\n (3 * b[3] + (4 * b[4] + 5 * b[5] * Rtxm) * Rtxm) * Rtxm) * Rtxm)\n\n # TODO: Test case that cover all those \"ifs\"\n I2 = SP_est < 2\n if I2.any():\n x = 400 * (Rtxm[I2] ** 2)\n sqrty = 10 * Rtxm[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n dSP_dRtx[I2] = (dSP_dRtx[I2] + a[0] * 800 * Rtxm[I2] * (1.5 + 2 *\n x) / (part1 ** 2) + b[0] * ft68[I2] * (10 + sqrty *\n (20 + 30 * sqrty)) / (part2 ** 2))\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n dSP_dRtx[I2] = Hill_ratio * dSP_dRtx[I2]\n\n # End of the one full iteration of the modified Newton-Raphson technique.\n Rtx = Rtx_old - (SP_est - SP) / dSP_dRtx # Updated Rtx\n\n # Now we do another half iteration of the modified Newton-Raphson\n # technique, making a total of one and a half modified N-R iterations.\n\n SP_est = a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) *\n Rtx) * Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] +\n (b[4] + b[5] * Rtx) * Rtx) * Rtx) * Rtx) * Rtx)\n\n # TODO: Test case that cover all those \"ifs\"\n I2 = SP_est < 2\n if I2.any():\n x = 400 * (Rtx[I2] ** 2)\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP_est[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n SP_est[I2] = Hill_ratio * SP_Hill_raw\n\n Rtx = Rtx - (SP_est - SP) / dSP_dRtx\n\n \"\"\" TODO: add this as a kw.\n Return the error, SP_error, in Rtx (in terms of psu).\n\n SP_est = (a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) *\n Rtx) * Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] +\n (b[4] + b[5] * Rtx) * Rtx) * Rtx) * Rtx) * Rtx))\n I2 = SP_est < 2\n if I2.any():\n x = 400 * (Rtx[I2] ** 2)\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP_est[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n SP_est[I2] = Hill_ratio * SP_Hill_raw\n\n SP_error = np.abs(SP - SP_est)\n\n This is the end of the error testing\n \"\"\"\n\n # Now go from Rtx to Rt and then to the conductivity ratio R at pressure p.\n Rt = Rtx ** 2\n A = d[2] + d[3] * t68\n B = 1 + d[0] * t68 + d[1] * t68 ** 2\n C = p * (e[0] + e[1] * p + e[2] * p ** 2)\n # rt_lc (i.e. rt_lower_case) corresponds to rt as defined in the\n # UNESCO 44 (1983) routines.\n rt_lc = c[0] + (c[1] + (c[2] + (c[3] + c[4] * t68) * t68) * t68) * t68\n\n D = B - A * rt_lc * Rt\n E = rt_lc * Rt * A * (B + C)\n Ra = np.sqrt(D ** 2 + 4 * E) - D\n\n return 0.5 * Ra / A\n\n\n@match_args_return\ndef SP_salinometer(Rt, t):\n r\"\"\"Calculates Practical Salinity SP from a salinometer, primarily using\n the PSS-78 algorithm. Note that the PSS-78 algorithm for Practical\n Salinity is only valid in the range 2 < SP < 42. If the PSS-78 algorithm\n produces a Practical Salinity that is less than 2 then the Practical\n Salinity is recalculated with a modified form of the Hill et al. (1986)\n formula. The modification of the Hill et al. (1986) expression is to\n ensure that it is exactly consistent with PSS-78 at SP = 2.\n\n A laboratory salinometer has the ratio of conductivities, Rt, as an output,\n and the present function uses this conductivity ratio and the temperature t\n of the salinometer bath as the two input variables.\n\n Parameters\n ----------\n Rt : array\n C(SP,t_68,0)/C(SP=35,t_68,0) [unitless]\n conductivity ratio\n :math:`R = \\frac{C(S, t_68, 0)}{C(35, 15(IPTS-68),0)} [unitless]\n\n t : array\n Temperature of the bath of the salinometer [:math:`^\\circ` C (ITS-90)]\n\n Returns\n -------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n\n See Also\n --------\n TODO: sw.sals\n\n Notes\n -----\n TODO\n\n Examples\n --------\n TODO\n\n References\n -----------\n ..[1] Fofonoff, P. and R.C. Millard Jr. 1983: Algorithms for computation of\n fundamental properties of seawater. Unesco Tech. Pap. in Mar. Sci., 44,\n 53 pp.\n\n ..[2] Hill, K.D., T.M. Dauphinee & D.J. Woods, 1986: The extension of the\n Practical Salinity Scale 1978 to low salinities. IEEE J. Oceanic Eng., 11,\n 109 - 112.\n\n .. [3] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See appendix E of this TEOS-10 Manual, and in\n particular, Eqns. (E.2.1) and (E.2.6).\n\n Modifications:\n 2011-04-30. Paul Barker, Trevor McDougall and Rich Pawlowicz. Version 3.0\n \"\"\"\n\n Rt, t = np.broadcast_arrays(Rt, t)\n\n t68 = t * 1.00024\n ft68 = (t68 - 15) / (1 + k * (t68 - 15))\n\n Rt[Rt < 0] = np.NaN\n Rtx = np.sqrt(Rt)\n\n SP = a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * Rtx) * Rtx) * Rtx) *\n Rtx) * Rtx + ft68 * (b[0] + (b[1] + (b[2] + (b[3] + (b[4] + b[5] *\n Rtx) * Rtx) * Rtx) * Rtx) * Rtx)\n\n \"\"\"The following section of the code is designed for SP < 2 based on the\n Hill et al. (1986) algorithm. This algorithm is adjusted so that it is\n exactly equal to the PSS-78 algorithm at SP = 2.\"\"\"\n\n I2 = SP < 2\n if I2.any():\n Hill_ratio = Hill_ratio_at_SP2(t[I2])\n x = 400 * Rt[I2]\n sqrty = 10 * Rtx[I2]\n part1 = 1 + x * (1.5 + x)\n part2 = 1 + sqrty * (1 + sqrty * (1 + sqrty))\n SP_Hill_raw = SP[I2] - a[0] / part1 - b[0] * ft68[I2] / part2\n SP[I2] = Hill_ratio * SP_Hill_raw\n # Ensure that SP is non-negative.\n SP = np.maximum(SP, 0)\n return SP\n\n\n@match_args_return\ndef SP_from_SK(SK):\n r\"\"\"Calculates Practical Salinity from Knudsen Salinity.\n\n Parameters\n ----------\n SK : array_like\n Knudsen Salinity [parts per thousand, ppt]\n\n Returns\n -------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n\n Examples\n --------\n TODO\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Appendix A.3.\n\n Modifications:\n 2011-11-16. Trevor McDougall and Paul Barker.\n \"\"\"\n\n SP = (SK - 0.03) * (1.80655 / 1.805)\n return np.maximum(SP, 0) # Ensure that SP is non-negative.\n\n\n@match_args_return\ndef SK_from_SP(SP):\n r\"\"\"Calculates Knudsen Salinity from Practical Salinity.\n\n Parameters\n ----------\n SP : array\n Practical Salinity [psu (PSS-78), unitless]\n\n Returns\n -------\n SK : array_like\n Knudsen Salinity [parts per thousand, ppt]\n\n Examples\n --------\n TODO\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Appendix A.3.\n\n Modifications:\n 2011-11-16. Trevor McDougall and Paul Barker.\n \"\"\"\n SP = np.maximum(SP, 0) # Ensure that SP is non-negative.\n\n return 0.03 + SP * (1.805 / 1.80655)\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n",
"# -*- coding: utf-8 -*-\n\nfrom __future__ import division\n\nimport numpy as np\n\nfrom library import gibbs\nfrom absolute_salinity_sstar_ct import CT_from_t\nfrom gsw.utilities import match_args_return, strip_mask\nfrom conversions import pt_from_CT, pt_from_t, pt0_from_t\nfrom constants import Kelvin, db2Pascal, P0, SSO, cp0, R, sfac, M_S\n\n__all__ = [\n 'rho_t_exact',\n 'pot_rho_t_exact',\n 'sigma0_pt0_exact',\n 'alpha_wrt_CT_t_exact',\n 'alpha_wrt_pt_t_exact',\n 'alpha_wrt_t_exact',\n 'beta_const_CT_t_exact',\n 'beta_const_pt_t_exact',\n 'beta_const_t_exact',\n 'specvol_t_exact',\n 'specvol_anom_t_exact',\n 'sound_speed_t_exact',\n 'kappa_t_exact',\n 'kappa_const_t_exact',\n 'internal_energy_t_exact',\n 'enthalpy_t_exact',\n 'dynamic_enthalpy_t_exact',\n 'SA_from_rho_t_exact',\n #'t_from_rho_exact',\n 't_maxdensity_exact',\n 'entropy_t_exact',\n 'cp_t_exact',\n 'isochoric_heat_cap_t_exact',\n 'chem_potential_relative_t_exact',\n 'chem_potential_water_t_exact',\n 'chem_potential_salt_t_exact',\n 'Helmholtz_energy_t_exact',\n 'adiabatic_lapse_rate_t_exact',\n 'osmotic_coefficient_t_exact',\n 'osmotic_pressure_t_exact'\n ]\n\nn0, n1, n2 = 0, 1, 2\n\n\n@match_args_return\ndef Helmholtz_energy_t_exact(SA, t, p):\n r\"\"\"Calculates the Helmholtz energy of seawater.\n\n The specific Helmholtz energy of seawater :math:`f` is given by:\n\n .. math::\n f(SA, t, p) = g - (p + P_0) \\nu =\n g - (p + P_0) \\frac{\\partial g}{\\partial P}\\Big|_{SA,T}\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n Helmholtz_energy : array_like\n Helmholtz energy [J kg :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.Helmholtz_energy_t_exact(SA, t, p)\n array([-5985.58288209, -5830.81845224, -3806.96617841, -877.66369421,\n -462.17033905, -245.50407205])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See section 2.13.\n\n Modifications:\n 2011-03-29. Trevor McDougall\n \"\"\"\n\n return (gibbs(n0, n0, n0, SA, t, p) -\n (db2Pascal * p + P0) * gibbs(n0, n0, n1, SA, t, p))\n\n\n@match_args_return\ndef rho_t_exact(SA, t, p):\n r\"\"\"Calculates in situ density of seawater from Absolute Salinity and in\n situ temperature.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n rho_t_exact : array_like\n in situ density [kg m :sup:`-3`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.rho(SA, t, p)\n array([ 1021.84017319, 1022.26268993, 1024.42771594, 1027.79020181,\n 1029.83771473, 1032.00240412])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See section 2.8.\n\n Modifications:\n 2011-03-29. Paul Barker, David Jackett and Trevor McDougal\n \"\"\"\n\n return 1. / gibbs(n0, n0, n1, SA, t, p)\n\n\n@match_args_return\ndef sigma0_pt0_exact(SA, pt0):\n r\"\"\"Calculates potential density anomaly with reference sea pressure of\n zero (0) dbar. The temperature input to this function is potential\n temperature referenced to zero dbar.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n pt0 : array_like\n potential temperature [:math:`^\\circ` C (ITS-90)]\n with respect to a reference sea pressure of 0 dbar\n\n Returns\n -------\n sigma0_pt0_exact : array_like\n potential density anomaly [kg m :sup:`-3`]\n respect to a reference pressure of 0 dbar\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.rho(SA, t, p)\n array([ 1021.84017319, 1022.26268993, 1024.42771594, 1027.79020181,\n 1029.83771473, 1032.00240412])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (3.6.1).\n\n Modifications:\n 2011-03-29. Trevor McDougal and Paul Barker.\n \"\"\"\n SA = np.maximum(SA, 0) # Ensure that SA is non-negative.\n\n x2 = sfac * SA\n x = np.sqrt(x2)\n y = pt0 * 0.025\n\n g03 = (100015.695367145 +\n y * (-270.983805184062 +\n y * (1455.0364540468 +\n y * (-672.50778314507 +\n y * (397.968445406972 +\n y * (-194.618310617595 +\n y * (63.5113936641785 -\n y * 9.63108119393062)))))))\n\n g08 = x2 * (-3310.49154044839 +\n x * (199.459603073901 +\n x * (-54.7919133532887 +\n x * 36.0284195611086 -\n y * 22.6683558512829) +\n y * (-175.292041186547 +\n y * (383.058066002476 +\n y * (-460.319931801257 +\n y * 234.565187611355)))) +\n y * (729.116529735046 +\n y * (-860.764303783977 +\n y * (694.244814133268 +\n y * (-297.728741987187)))))\n\n \"\"\"The above code is exactly the same as the following two lines of code.\n sigma0_pt_exact = rho_t_exact(SA, pt0, 0.) - 1000\n \"\"\"\n\n return 100000000. / (g03 + g08) - 1000.0\n\n\n@match_args_return\ndef enthalpy_t_exact(SA, t, p):\n r\"\"\"Calculates the specific enthalpy of seawater.\n\n The specific enthalpy of seawater :math:`h` is given by:\n\n .. math::\n h(SA, t, p) = g + (T_0 + t)\\eta =\n g - (T_0 + t) \\frac{\\partial g}{\\partial T}\\Big|_{SA,p}\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n enthalpy : array_like\n specific enthalpy [J kg :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.enthalpy(SA, t, p)\n array([ 115103.26047838, 114014.8036012 , 92179.9209311 ,\n 43255.32838089, 33087.21597002, 26970.5880448 ])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See appendix A.11.\n\n Modifications:\n 2011-03-29. David Jackett, Trevor McDougall and Paul Barker.\n \"\"\"\n\n return (gibbs(n0, n0, n0, SA, t, p) -\n (t + Kelvin) * gibbs(n0, n1, n0, SA, t, p))\n\n\n@match_args_return\ndef specvol_t_exact(SA, t, p):\n r\"\"\"Calculates the specific volume of seawater.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n specvol : array_like\n specific volume [m :sup:`3` kg :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.specvol(SA, t, p)\n array([ 0.00097863, 0.00097822, 0.00097615, 0.00097296, 0.00097103,\n 0.00096899])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See section 2.7.\n\n Modifications:\n 2011-03-23. David Jackett and Paul Barker.\n \"\"\"\n\n return gibbs(n0, n0, n1, SA, t, p)\n\n\n@match_args_return\ndef entropy_t_exact(SA, t, p):\n r\"\"\"Calculates specific entropy of seawater.\n\n The specific entropy of seawater :math:`\\eta` is given by:\n\n .. math::\n \\eta(SA, t, p) = -g_T = \\frac{\\partial g}{\\partial T}\\Big|_{SA,p}\n\n When taking derivatives with respect to *in situ* temperature, the symbol\n :math:`T` will be used for temperature in order that these derivatives not\n be confused with time derivatives.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n entropy : array_like\n specific entropy [J kg :sup:`-1` K :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.entropy_t_exact(SA, t, p)\n array([ 400.38942528, 395.43817843, 319.8664982 , 146.79088159,\n 98.64734087, 62.79150873])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp.\n\n Modifications:\n 2011-03-29. David Jackett, Trevor McDougall and Paul Barker.\n \"\"\"\n\n return -gibbs(n0, n1, n0, SA, t, p)\n\n\n@match_args_return\ndef cp_t_exact(SA, t, p):\n r\"\"\"Calculates the isobaric heat capacity of seawater.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n cp_t_exact : array_like\n heat capacity of seawater [J kg :sup:`-1` K :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.cp_t_exact(SA, t, p)\n array([ 4002.88800396, 4000.98028393, 3995.54646889, 3985.07676902,\n 3973.59384348, 3960.18408479])\n\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp.\n\n Modifications:\n 2011-03-29. David Jackett, Trevor McDougall and Paul Barker\n \"\"\"\n\n return -(t + Kelvin) * gibbs(n0, n2, n0, SA, t, p)\n\n\n@match_args_return\ndef sound_speed_t_exact(SA, t, p):\n r\"\"\"Calculates the speed of sound in seawater.\n\n The speed of sound in seawater :math:`c` is given by:\n\n .. math::\n c(SA, t, p) = \\sqrt{ \\partial P / \\partial \\rho |_{SA,\\eta}} =\n \\sqrt{(\\rho\\kappa)^{-1}} =\n g_P \\sqrt{g_{TT}/(g^2_{TP} - g_{TT}g_{PP})}\n\n Note that in these expressions, since sound speed is in m s :sup`-1` and\n density has units of kg m :sup:`-3` it follows that the pressure of the\n partial derivatives must be in Pa and the isentropic compressibility\n :math:`kappa` must have units of Pa :sup:`-1`. The sound speed c produced\n by both the SIA and the GSW software libraries (appendices M and N) has\n units of m s :sup:`-1`.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n sound_speed : array_like\n speed of sound in seawater [m s :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.sound_speed_t_exact(SA, t, p)\n array([ 1542.61580359, 1542.70353407, 1530.84497914, 1494.40999692,\n 1487.37710252, 1483.93460908])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (2.17.1)\n\n Modifications:\n 2011-03-29. David Jackett, Paul Barker and Trevor McDougall.\n \"\"\"\n\n return (gibbs(n0, n0, n1, SA, t, p) * np.sqrt(gibbs(n0, n2, n0, SA, t, p) /\n (gibbs(n0, n1, n1, SA, t, p) ** 2 - gibbs(n0, n2, n0, SA, t, p) *\n gibbs(n0, n0, n2, SA, t, p))))\n\n\n@match_args_return\ndef specvol_anom_t_exact(SA, t, p):\n r\"\"\"Calculates specific volume anomaly from Absolute Salinity, in situ\n temperature and pressure, using the full TEOS-10 Gibbs function.\n\n The reference value of Absolute Salinity is SSO and the reference value of\n Conservative Temperature is equal to 0 :math:`^\\circ` C.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n specvol_anom_t_exact : array_like\n specific volume anomaly [m :sup:`3` kg :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.specvol_anom_t_exact(SA, t, p)\n array([ 6.01044463e-06, 5.78602432e-06, 4.05564999e-06,\n 1.42198662e-06, 1.04351837e-06, 7.63964850e-07])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (3.7.3)\n\n Modifications:\n 2011-03-23. Trevor McDougall and Paul Barker\n \"\"\"\n\n pt_zero = pt_from_CT(SSO, 0)\n t_zero = pt_from_t(SSO, pt_zero, 0, p)\n return (gibbs(n0, n0, n1, SA, t, p) -\n gibbs(n0, n0, n1, SSO, t_zero, p))\n\n\n@match_args_return\ndef chem_potential_relative_t_exact(SA, t, p):\n r\"\"\"Calculates the adiabatic lapse rate of seawater.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n chem_potential_relative : array_like\n relative chemical potential [J kg :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.chem_potential_relative_t_exact(SA, t, p)\n array([ 79.4254481 , 79.25989214, 74.69154859, 65.64063719,\n 61.22685656, 57.21298557])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp.\n\n Modifications:\n 2011-03-29. Trevor McDougall and Paul Barker\n \"\"\"\n\n return gibbs(n1, n0, n0, SA, t, p)\n\n\n@match_args_return\ndef internal_energy_t_exact(SA, t, p):\n r\"\"\"Calculates the Helmholtz energy of seawater.\n\n The specific internal energy of seawater :math:`u` is given by:\n\n .. math::\n u(SA, t, p) = g + (T_0 + t)\\eta - (p + P_0)\\nu =\n g - (T_0 + t)\\frac{\\partial g}{\\partial T}\\Big|_{SA,p} -\n (p + P_0)\\frac{\\partial g}{\\partial P}\\Big|_{SA,T}\n\n where :math:`T_0` is the Celsius zero point, 273.15 K and\n :math:`P_0` = 101 325 Pa is the standard atmosphere pressure.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n internal_energy (u) : array_like\n specific internal energy [J kg :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.internal_energy_t_exact(SA, t, p)\n array([ 114906.23847309, 113426.57417062, 90860.81858842,\n 40724.34005719, 27162.66600185, 17182.50522667])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (2.11.1)\n\n Modifications:\n 2011-03-29. Trevor McDougall\n \"\"\"\n\n return (gibbs(n0, n0, n0, SA, t, p) -\n (Kelvin + t) * gibbs(n0, n1, n0, SA, t, p) -\n (db2Pascal * p + P0) * gibbs(n0, n0, n1, SA, t, p))\n\n\n@match_args_return\ndef kappa_const_t_exact(SA, t, p):\n r\"\"\"Calculates isothermal compressibility of seawater at constant in situ\n temperature.\n\n .. math::\n \\kappa^t(SA, t, p) =\n \\rho^{-1}\\frac{\\partial \\rho}{\\partial P}\\Big|_{SA,T} =\n -\\nu^{-1}\\frac{\\partial \\nu}{\\partial P}\\Big|_{SA,T} =\n -\\frac{g_{PP}}{g_P}\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n kappa : array_like\n Isothermal compressibility [Pa :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n This is the compressibility of seawater at constant in situ temperature.\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.kappa_const_t_exact(SA, t, p)\n array([ 4.19071646e-10, 4.18743202e-10, 4.22265764e-10,\n 4.37735100e-10, 4.40373818e-10, 4.41156577e-10])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (2.15.1)\n\n Modifications:\n 2011-03-29. David Jackett, Trevor McDougall and Paul Barker\n \"\"\"\n\n return -gibbs(n0, n0, n2, SA, t, p) / gibbs(n0, n0, n1, SA, t, p)\n\n\n@match_args_return\ndef alpha_wrt_t_exact(SA, t, p):\n r\"\"\"Calculates the thermal expansion coefficient of seawater with respect\n to in situ temperature.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n alpha_wrt_t : array_like\n thermal expansion coefficient [K :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.alpha_wrt_t_exact(SA, t, p)\n array([ 0.0003256 , 0.00032345, 0.00028141, 0.00017283, 0.00014557,\n 0.00012836])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (2.18.1)\n\n .. [2] McDougall, T.J., D.R. Jackett and F.J. Millero, 2010: An algorithm\n for estimating Absolute Salinity in the global ocean. Submitted to Ocean\n Science. A preliminary version is available at Ocean Sci. Discuss.,\n 6, 215-242.\n\n Modifications:\n 2011-03-29. David Jackett, Trevor McDougall and Paul Barker\n \"\"\"\n\n return gibbs(n0, n1, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p)\n\n\n@match_args_return\ndef isochoric_heat_cap_t_exact(SA, t, p):\n r\"\"\"Calculates the isochoric heat capacity of seawater.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n isochoric_heat_cap : array_like\n isochoric heat capacity [J kg :sup:`-1` K :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.isochoric_heat_cap_t_exact(SA, t, p)\n array([ 3928.13708702, 3927.27381633, 3941.36418525, 3966.26126146,\n 3960.50903222, 3950.13901342])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See section 2.21.\n\n Modifications:\n 2011-03-29. Trevor McDougall\n \"\"\"\n\n return (-(Kelvin + t) * (gibbs(n0, n2, n0, SA, t, p) -\n gibbs(n0, n1, n1, SA, t, p) ** 2 / gibbs(n0, n0, n2, SA, t, p)))\n\n\n@match_args_return\ndef kappa_t_exact(SA, t, p):\n r\"\"\"Calculates the isentropic compressibility of seawater.\n\n When the entropy and Absolute Salinity are held constant while the pressure\n is changed, the isentropic and isohaline compressibility\n :math:`kappa` is obtained:\n\n .. math::\n \\kappa(SA, t, p) =\n \\rho^{-1}\\frac{\\partial \\rho}{\\partial P}\\Big|_{SA,\\eta} =\n -\\nu^{-1}\\frac{\\partial \\nu}{\\partial P}\\Big|_{SA,\\eta} =\n \\rho^{-1}\\frac{\\partial \\rho}{\\partial P}\\Big|_{SA,\\theta} =\n -\\nu^{-1}\\frac{\\partial \\nu}{\\partial P}\\Big|_{SA,\\theta} =\n -\\frac{ (g_{TP}^2 - g_{TT} g_{PP} ) }{g_P g_{TT}}\n\n The isentropic and isohaline compressibility is sometimes called simply the\n isentropic compressibility (or sometimes the \"adiabatic compressibility\"),\n on the unstated understanding that there is also no transfer of salt during\n the isentropic or adiabatic change in pressure.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n kappa : array_like\n Isentropic compressibility [Pa :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n The output is Pascal and not dbar.\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.kappa_t_exact(SA, t, p)\n array([ 4.11245799e-10, 4.11029072e-10, 4.16539558e-10,\n 4.35668338e-10, 4.38923693e-10, 4.40037576e-10])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqns. (2.16.1) and the row for kappa in\n Table P.1 of appendix P\n\n Modifications:\n 2011-03-23. David Jackett, Trevor McDougall and Paul Barker\n \"\"\"\n\n return ((gibbs(n0, n1, n1, SA, t, p) ** 2 - gibbs(n0, n2, n0, SA, t, p) *\n gibbs(n0, n0, n2, SA, t, p)) / (gibbs(n0, n0, n1, SA, t, p) *\n gibbs(n0, n2, n0, SA, t, p)))\n\n\n@match_args_return\ndef SA_from_rho_t_exact(rho, t, p):\n r\"\"\"Calculates the Absolute Salinity of a seawater sample, for given values\n of its density, in situ temperature and sea pressure (in dbar).\n\n One use for this function is in the laboratory where a measured value of\n the in situ density :math:`\\rho` of a seawater sample may have been made at\n the laboratory temperature :math:`t` and at atmospheric pressure :math:`p`.\n The present function will return the Absolute Salinity SA of this seawater\n sample.\n\n Parameters\n ----------\n rho : array_like\n in situ density [kg m :sup:`-3`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n This is expressed on the Reference-Composition Salinity Scale of\n Millero et al. (2008).\n\n After two iterations of a modified Newton-Raphson iteration,\n the error in SA is typically no larger than\n 2 :math:`^\\times` 10 :sup:`-13` [g kg :sup:`-1`]\n\n Examples\n --------\n >>> import gsw\n >>> rho = [1021.839, 1022.262, 1024.426, 1027.792, 1029.839, 1032.002]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.SA_from_rho_t_exact(rho, t, p)\n array([ 34.71022966, 34.89057683, 35.02332421, 34.84952096,\n 34.73824809, 34.73188384])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See section 2.5.\n\n .. [2] Millero, F. J., R. Feistel, D. G. Wright, and T. J. McDougall, 2008:\n The composition of Standard Seawater and the definition of the\n Reference-Composition Salinity Scale, Deep-Sea Res. I, 55, 50-72.\n\n Modifications:\n 2011-03-28. Trevor McDougall and Paul Barker.\n \"\"\"\n\n v_lab = np.ones_like(rho) / rho\n v_0 = gibbs(n0, n0, n1, 0, t, p)\n v_120 = gibbs(n0, n0, n1, 120, t, p)\n\n # Initial estimate of SA.\n SA = 120 * (v_lab - v_0) / (v_120 - v_0)\n Ior = np.logical_or(SA < 0, SA > 120)\n\n # Initial estimate of v_SA, SA derivative of v\n v_SA = (v_120 - v_0) / 120\n\n for k in range(0, 2):\n SA_old = SA\n delta_v = gibbs(n0, n0, n1, SA_old, t, p) - v_lab\n # Half way the mod. N-R method (McDougall and Wotherspoon, 2012)\n SA = SA_old - delta_v / v_SA\n SA_mean = 0.5 * (SA + SA_old)\n v_SA = gibbs(n1, n0, n1, SA_mean, t, p)\n SA = SA_old - delta_v / v_SA\n\n SA[Ior] = np.ma.masked\n\n return SA\n\n\n@match_args_return\ndef t_from_rho_exact(rho, SA, p):\n r\"\"\"Calculates the in-situ temperature of a seawater sample, for given\n values of its density, Absolute Salinity and sea pressure (in dbar).\n\n\n Parameters\n ----------\n rho : array_like\n in situ density [kg m :sup:`-3`]\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n t_multiple : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n At low salinities, in brackish water, there are two possible temperatures\n for a single density. This program will output both valid solutions\n (t, t_multiple), if there is only one possible solution the second variable\n will be set to NaN.\n\n\n Examples\n --------\n TODO\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp.\n\n Modifications:\n 2011-04-21. Trevor McDougall and Paul Barker.\n \"\"\"\n\n \"\"\"alpha_limit is the positive value of the thermal expansion coefficient\n which is used at the freezing temperature to distinguish between I_salty\n and I_fresh.\"\"\"\n alpha_limit = 1e-5\n\n \"\"\"rec_half_rho_TT is a constant representing the reciprocal of half the\n second derivative of density with respect to temperature near the\n temperature of maximum density.\"\"\"\n rec_half_rho_TT = -110.0\n\n t = np.zeros_like(SA) + np.NaN\n t_multiple = np.zeros_like(SA) + np.NaN\n\n I_SA = np.logical_or(SA < 0, SA > 42)\n I_p = np.logical_or(p < -1.5, p > 12000)\n I_SA_p = np.logical_or(I_SA, I_p)\n\n SA[I_SA_p] = np.ma.masked\n\n rho_40 = rho_t_exact(SA, 40 * np.ones_like(SA), p)\n\n I_rho_light = (rho - rho_40) < 0\n\n SA[I_rho_light] = np.ma.masked\n\n t_max_rho = t_maxdensity_exact(SA, p)\n rho_max = rho_t_exact(SA, t_max_rho, p)\n rho_extreme = rho_max\n t_freezing = t_freezing(SA, p) # Assumes seawater is saturated with air.\n rho_freezing = rho_t_exact(SA, t_freezing, p)\n\n I_fr_gr_max = (t_freezing - t_max_rho) > 0\n rho_extreme[I_fr_gr_max] = rho_freezing[I_fr_gr_max]\n\n I_rho_dense = rho > rho_extreme\n SA[I_rho_dense] = np.ma.masked\n\n # FIXME: Is this needed?\n I_bad = np.isnan(SA * p * rho)\n SA[I_bad] = np.ma.masked\n\n alpha_freezing = alpha_wrt_t_exact(SA, t_freezing, p)\n\n I_salty = alpha_freezing > alpha_limit\n\n t_diff = 40. * np.ones_like(I_salty) - t_freezing(I_salty)\n\n top = (rho_40[I_salty] - rho_freezing[I_salty] +\n rho_freezing[I_salty] * alpha_freezing[I_salty] * t_diff)\n\n a = top / (t_diff ** 2)\n b = -rho_freezing[I_salty] * alpha_freezing[I_salty]\n c = rho_freezing[I_salty] - rho[I_salty]\n sqrt_disc = np.sqrt(b ** 2 - 4 * a * c)\n # The value of t[I_salty] is the initial guess `t` in the range of I_salty.\n t[I_salty] = t_freezing[I_salty] + 0.5 * (-b - sqrt_disc) / a\n\n I_fresh = alpha_freezing <= alpha_limit\n t_diff = 40 * np.ones_like[I_fresh] - t_max_rho[I_fresh]\n factor = ((rho_max[I_fresh] - rho[I_fresh]) /\n (rho_max[I_fresh] - rho_40[I_fresh]))\n delta_t = t_diff * np.sqrt(factor)\n\n I_fresh_NR = delta_t > 5\n t[I_fresh[I_fresh_NR]] = (t_max_rho[I_fresh[I_fresh_NR]] +\n delta_t[I_fresh_NR])\n\n I_quad = delta_t <= 5\n t_a = np.zeros_like(SA) + np.NaN\n # Set the initial value of the quadratic solution roots.\n t_a[I_fresh[I_quad]] = (t_max_rho[I_fresh[I_quad]] +\n np.sqrt(rec_half_rho_TT * (rho[I_fresh[I_quad]] -\n rho_max[I_fresh[I_quad]])))\n\n for Number_of_iterations in range(0, 5):\n t_old = t_a\n rho_old = rho_t_exact(SA, t_old, p)\n factorqa = (rho_max - rho) / (rho_max - rho_old)\n t_a = t_max_rho + (t_old - t_max_rho) * np.sqrt(factorqa)\n\n t_a[t_freezing - t_a < 0] = np.ma.masked\n\n t_b = np.zeros_like(SA) + np.NaN\n # Set the initial value of the quadratic solution routes.\n t_b[I_fresh[I_quad]] = (t_max_rho[I_fresh[I_quad]] -\n np.sqrt(rec_half_rho_TT * (rho[I_fresh[I_quad]] -\n rho_max[I_fresh[I_quad]])))\n for Number_of_iterations in range(0, 6):\n t_old = t_b\n rho_old = rho_t_exact(SA, t_old, p)\n factorqb = (rho_max - rho) / (rho_max - rho_old)\n t_b = t_max_rho + (t_old - t_max_rho) * np.sqrt(factorqb)\n\n # After seven iterations of this quadratic iterative procedure,\n # the error in rho is no larger than 4.6x10^-13 kg/m^3.\n t_b[t_freezing - t_b < 0] = np.ma.masked\n\n # Begin the modified Newton-Raphson iterative method, which will only\n # operate on non-masked data.\n\n v_lab = np.ones_like(rho) / rho\n v_t = gibbs(0, 1, 1, SA, t, p)\n for Number_of_iterations in range(0, 3):\n t_old = t\n delta_v = gibbs(0, 0, 1, SA, t_old, p) - v_lab\n t = t_old - delta_v / v_t # Half way through the modified N-R method.\n t_mean = 0.5 * (t + t_old)\n v_t = gibbs(0, 1, 1, SA, t_mean, p)\n t = t_old - delta_v / v_t\n\n I_quad = ~np.isnan(t_a)\n t[I_quad] = t_a[I_quad]\n\n I_quad = ~np.isnan(t_b)\n t_multiple[I_quad] = t_b[I_quad]\n\n # After three iterations of this modified Newton-Raphson iteration,\n # the error in rho is no larger than 4.6x10^-13 kg/m^3.\n\n return t, t_multiple\n\n\n@match_args_return\ndef pot_rho_t_exact(SA, t, p, p_ref=0):\n r\"\"\"Calculates potential density of seawater.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n p_ref : int, float, optional\n reference pressure, default = 0\n\n Returns\n -------\n pot_rho : array_like\n potential density [kg m :sup:`-3`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.pot_rho_t_exact(SA, t, p)\n array([ 1021.79814581, 1022.05248442, 1023.89358365, 1026.66762112,\n 1027.10723087, 1027.40963126])\n >>> gsw.pot_rho(SA, t, p, p_ref=1000)\n array([ 1025.95554512, 1026.21306986, 1028.12563226, 1031.1204547 ,\n 1031.63768355, 1032.00240412])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See section 3.4.\n\n Modifications:\n 2011-03-29. David Jackett, Trevor McDougall and Paul Barker\n \"\"\"\n\n pt = pt_from_t(SA, t, p, p_ref=p_ref)\n\n return rho_t_exact(SA, pt, p_ref)\n\n\n@match_args_return\ndef alpha_wrt_CT_t_exact(SA, t, p):\n r\"\"\"Calculates the thermal expansion coefficient of seawater with respect\n to Conservative Temperature.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n alpha_wrt_CT : array_like\n thermal expansion coefficient [K :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.alpha_wrt_CT_t_exact(SA, t, p)\n array([ 0.00032471, 0.00032272, 0.00028118, 0.00017314, 0.00014627,\n 0.00012943])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (2.18.3).\n\n Modifications:\n 2011-03-29. Trevor McDougall and Paul Barker\n \"\"\"\n\n pt0 = pt0_from_t(SA, t, p)\n factor = -cp0 / ((Kelvin + pt0) * gibbs(n0, n2, n0, SA, t, p))\n return factor * (gibbs(n0, n1, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p))\n\n\n@match_args_return\ndef alpha_wrt_pt_t_exact(SA, t, p):\n r\"\"\"Calculates the thermal expansion coefficient of seawater with respect\n to potential temperature, with a reference pressure of zero.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n alpha_wrt_pt : array_like\n thermal expansion coefficient [K :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.alpha_wrt_pt_t_exact(SA, t, p)\n array([ 0.00032562, 0.00032355, 0.00028164, 0.00017314, 0.00014623,\n 0.00012936])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (2.18.2).\n\n Modifications:\n 2011-03-29. David Jackett, Trevor McDougall and Paul Barker\n \"\"\"\n\n pt0 = pt0_from_t(SA, t, p)\n factor = gibbs(n0, n2, n0, SA, pt0, 0) / gibbs(n0, n2, n0, SA, t, p)\n return factor * (gibbs(n0, n1, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p))\n\n\n@match_args_return\ndef beta_const_CT_t_exact(SA, t, p):\n r\"\"\"Calculates the saline (i.e. haline) contraction coefficient of seawater\n at constant Conservative Temperature.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n beta_const_CT : array_like\n saline contraction coefficient [kg g :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.beta_const_CT_t_exact(SA, t, p)\n array([ 0.00071749, 0.00071765, 0.00072622, 0.00075051, 0.00075506,\n 0.00075707])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (2.19.3)\n\n Modifications:\n 2010-07-23. David Jackett, Trevor McDougall and Paul Barker\n \"\"\"\n\n # TODO: Original GSW-V3 re-implements gibbs, check what to do here!\n\n pt0 = pt0_from_t(SA, t, p)\n\n factora = (gibbs(n1, n1, n0, SA, t, p) - gibbs(n1, n0, n0, SA, pt0, 0) /\n (Kelvin + pt0))\n factor = (factora / (gibbs(n0, n0, n1, SA, t, p) *\n gibbs(n0, n2, n0, SA, t, p)))\n\n return (gibbs(n0, n1, n1, SA, t, p) * factor -\n gibbs(n1, n0, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p))\n\n\n@match_args_return\ndef beta_const_pt_t_exact(SA, t, p):\n r\"\"\"Calculates the saline (i.e. haline) contraction coefficient of seawater\n at constant potential temperature with a reference pressure of 0 dbar.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n beta_const_pt : array_like\n saline contraction coefficient [kg g :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.beta_const_pt_t_exact(SA, t, p)\n array([ 0.00073112, 0.00073106, 0.00073599, 0.00075375, 0.00075712,\n 0.00075843])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (2.19.2)\n\n Modifications:\n 2011-04-10. Trevor McDougall and Paul Barker\n \"\"\"\n # NOTE: The original Matlab toolbox re-implement some code here. Why?\n\n pt0 = pt0_from_t(SA, t, p)\n\n factora = gibbs(n1, n1, n0, SA, t, p) - gibbs(n1, n1, n0, SA, pt0, 0)\n\n factor = (factora / (gibbs(n0, n0, n1, SA, t, p) *\n gibbs(n0, n2, n0, SA, t, p)))\n\n return (gibbs(n0, n1, n1, SA, t, p) * factor -\n gibbs(n1, n0, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p))\n\n\n@match_args_return\ndef beta_const_t_exact(SA, t, p):\n r\"\"\"Calculates the saline (i.e. haline) contraction coefficient of seawater\n at constant in situ temperature.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n beta_const_t : array_like\n saline contraction coefficient [kg g :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.beta_const_t_exact(SA, t, p)\n array([ 0.00073112, 0.00073107, 0.00073602, 0.00075381, 0.00075726,\n 0.00075865])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (2.19.1)\n\n Modifications:\n 2011-03-29. David Jackett, Trevor McDougall and Paul Barker\n \"\"\"\n\n return -gibbs(n1, n0, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p)\n\n\n@match_args_return\ndef chem_potential_water_t_exact(SA, t, p):\n r\"\"\"Calculates the chemical potential of water in seawater.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n chem_potential_water : array_like\n chemical potential of water in seawater\n [J kg :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.chem_potential_water_t_exact(SA, t, p)\n array([-8545.56114628, -8008.08554834, -5103.98013987, -634.06778275,\n 3335.56680347, 7555.43444597])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp.\n\n Modifications:\n 2011-03-29. Trevor McDougall and Paul Barker\n \"\"\"\n SA, t, p, mask = strip_mask(SA, t, p)\n\n # FIXME: Ugly copy from gibbs, why?\n x2 = sfac * SA\n\n x = np.sqrt(x2)\n y = t * 0.025\n z = p * 1e-4 # Pressure (p) is sea pressure in units of dbar.\n\n g03_g = (101.342743139674 + z * (100015.695367145 +\n z * (-2544.5765420363 + z * (284.517778446287 +\n z * (-33.3146754253611 + (4.20263108803084 -\n 0.546428511471039 * z) * z)))) +\n y * (5.90578347909402 + z * (-270.983805184062 +\n z * (776.153611613101 + z * (-196.51255088122 +\n (28.9796526294175 - 2.13290083518327 * z) * z))) +\n y * (-12357.785933039 + z * (1455.0364540468 +\n z * (-756.558385769359 + z * (273.479662323528 +\n z * (-55.5604063817218 + 4.34420671917197 * z)))) +\n y * (736.741204151612 + z * (-672.50778314507 +\n z * (499.360390819152 + z * (-239.545330654412 +\n (48.8012518593872 - 1.66307106208905 * z) * z))) +\n y * (-148.185936433658 + z * (397.968445406972 +\n z * (-301.815380621876 + (152.196371733841 -\n 26.3748377232802 * z) * z)) +\n y * (58.0259125842571 + z * (-194.618310617595 +\n z * (120.520654902025 + z * (-55.2723052340152 +\n 6.48190668077221 * z))) +\n y * (-18.9843846514172 + y * (3.05081646487967 -\n 9.63108119393062 * z) +\n z * (63.5113936641785 + z * (-22.2897317140459 +\n 8.17060541818112 * z)))))))))\n\n g08_g = x2 * (1416.27648484197 +\n x * (-2432.14662381794 + x * (2025.80115603697 +\n y * (543.835333000098 + y * (-68.5572509204491 +\n y * (49.3667694856254 + y * (-17.1397577419788 +\n 2.49697009569508 * y))) - 22.6683558512829 * z) +\n x * (-1091.66841042967 - 196.028306689776 * y +\n x * (374.60123787784 - 48.5891069025409 * x +\n 36.7571622995805 * y) + 36.0284195611086 * z) +\n z * (-54.7919133532887 + (-4.08193978912261 -\n 30.1755111971161 * z) * z)) +\n z * (199.459603073901 + z * (-52.2940909281335 +\n (68.0444942726459 - 3.41251932441282 * z) * z)) +\n y * (-493.407510141682 + z * (-175.292041186547 +\n (83.1923927801819 - 29.483064349429 * z) * z) +\n y * (-43.0664675978042 + z * (383.058066002476 +\n z * (-54.1917262517112 + 25.6398487389914 * z)) +\n y * (-10.0227370861875 - 460.319931801257 * z + y *\n (0.875600661808945 + 234.565187611355 * z))))) +\n y * (168.072408311545))\n\n g_SA_part = (8645.36753595126 +\n x * (-7296.43987145382 + x * (8103.20462414788 +\n y * (2175.341332000392 + y * (-274.2290036817964 +\n y * (197.4670779425016 + y * (-68.5590309679152 +\n 9.98788038278032 * y))) - 90.6734234051316 * z) +\n x * (-5458.34205214835 - 980.14153344888 * y +\n x * (2247.60742726704 - 340.1237483177863 * x +\n 220.542973797483 * y) + 180.142097805543 * z) +\n z * (-219.1676534131548 + (-16.32775915649044 -\n 120.7020447884644 * z) * z)) +\n z * (598.378809221703 + z * (-156.8822727844005 +\n (204.1334828179377 - 10.23755797323846 * z) * z)) +\n y * (-1480.222530425046 + z * (-525.876123559641 +\n (249.57717834054571 - 88.449193048287 * z) * z) +\n y * (-129.1994027934126 + z * (1149.174198007428 +\n z * (-162.5751787551336 + 76.9195462169742 * z)) +\n y * (-30.0682112585625 - 1380.9597954037708 * z + y *\n (2.626801985426835 + 703.695562834065 * z))))) +\n y * (1187.3715515697959))\n\n chem_potential_water = g03_g + g08_g - 0.5 * sfac * SA * g_SA_part\n\n return np.ma.array(chem_potential_water, mask=mask, copy=False)\n\n\n@match_args_return\ndef chem_potential_salt_t_exact(SA, t, p):\n r\"\"\"Calculates the chemical potential of salt in seawater.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n chem_potential_salt : array_like\n chemical potential of salt in seawater [J kg :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.chem_potential_salt_t_exact(SA, t, p)\n array([-8466.13569818, -7928.8256562 , -5029.28859129, -568.42714556,\n 3396.79366004, 7612.64743154])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See section 2.9.\n\n Modifications:\n 2010-03-29. Trevor McDougall and Paul Barker\n \"\"\"\n\n return (chem_potential_relative_t_exact(SA, t, p) +\n chem_potential_water_t_exact(SA, t, p))\n\n\n@match_args_return\ndef adiabatic_lapse_rate_t_exact(SA, t, p):\n r\"\"\"Calculates the adiabatic lapse rate of seawater.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n adiabatic_lapse_rate : array_like\n Adiabatic lapse rate [K Pa :sup:`-1`]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n The output is in unit of degrees Celsius per Pa, (or equivalently K/Pa) not\n in units of K/dbar\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.adiabatic_lapse_rate_t_exact(SA, t, p)\n array([ 2.40350282e-08, 2.38496700e-08, 2.03479880e-08,\n 1.19586543e-08, 9.96170718e-09, 8.71747270e-09])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See Eqn. (2.22.1).\n\n Modifications:\n 2011-03-29. Trevor McDougall and Paul Barker\n \"\"\"\n\n return -gibbs(n0, n1, n1, SA, t, p) / gibbs(n0, n2, n0, SA, t, p)\n\n\n@match_args_return\ndef osmotic_coefficient_t_exact(SA, t, p):\n r\"\"\"Calculates the osmotic coefficient of seawater.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n osmotic_coefficient : array_like\n osmotic coefficient of seawater [unitless]\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n >>> import gsw\n >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]\n >>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]\n >>> p = [10, 50, 125, 250, 600, 1000]\n >>> gsw.osmotic_coefficient_t_exact(SA,t , p)\n array([ 0.90284718, 0.90298624, 0.90238866, 0.89880927, 0.89801054,\n 0.89767912])\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp.\n\n Modifications:\n 2011-04-01. Trevor McDougall and Paul Barker.\n 2012-11-15. Trevor McDougall and Paul Barker.\n \"\"\"\n\n SA = np.maximum(SA, 0)\n k = M_S / R\n part = k * (1000 - SA) / (Kelvin + t)\n\n x2 = sfac * SA\n x = np.sqrt(x2)\n y = t * 0.025\n # Note that the input pressure (p) is sea pressure in units of dbar.\n z = p / db2Pascal\n\n oc = (7.231916621570606e1, 1.059039593127674e1, -3.025914794694813e1,\n 5.040733670521486e1, -4.074543321119333e1, 1.864215613820487e1,\n -3.022566485046178, -6.138647522851840, 1.353207379758663e1,\n -7.316560781114737, 1.829232499785750, -5.358042980767074e-1,\n -1.705887283375562, -1.246962174707332e-1, 1.228376913546017,\n 1.089364009088042e-2, -4.264828939262248e-1, 6.213127679460041e-2,\n 2.481543497315280, -1.363368964861909, -5.640491627443773e-1,\n 1.344724779893754, -2.180866793244492, 4.765753255963401,\n -5.726993916772165, 2.918303792060746, -6.506082399183509e-1,\n -1.015695507663942e-1, 1.035024326471108, -6.742173543702397e-1,\n 8.465642650849419e-1, -7.508472135244717e-1, -3.668086444057845e-1,\n 3.189939162107803e-1, -4.245629194309487e-2)\n\n tl = (oc[0] + oc[1] * y + x * (oc[2] + x * (oc[3] + x * (oc[4] + x *\n (oc[5] + oc[6] * x))) + y * (oc[7] + x * (oc[8] + x *\n (oc[9] + oc[10] * x)) + y * (oc[11] + oc[12] * x + y * (oc[13] +\n oc[14] * x + y * (oc[15] + x * (oc[16] + oc[17] * y))))) + z *\n (oc[18] + x * (oc[19] + oc[20] * y + oc[21] * x) + y * (oc[22] + y *\n (oc[23] + y * (oc[24] + oc[25] * y))) + z * (oc[26] + oc[27] * x + y *\n (oc[28] + oc[29] * y) + z * (oc[30] + oc[31] * x + y * (oc[32] +\n oc[33] * y) + oc[34] * z)))))\n\n return tl * part\n\n\n@match_args_return\ndef dynamic_enthalpy_t_exact(SA, t, p):\n r\"\"\"Calculates the dynamic enthalpy of seawater from Absolute Salinity, in\n situ temperature and pressure. Dynamic enthalpy was defined by Young\n (2010) as the difference between enthalpy and potential enthalpy. Note that\n this function uses the full TEOS-10 Gibbs function (i.e. the sum of the\n IAPWS-09 and IAPWS-08 Gibbs functions, see the TEOS-10 Manual, IOC et al.\n (2010)).\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n dynamic_enthalpy_t_exact : array_like\n dynamic enthalpy [J :sup:`-1`]\n\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp.\n\n .. [2] Young, W.R., 2010: Dynamic enthalpy, Conservative Temperature, and\n the seawater. Boussinesq approximation. Journal of Physical Oceanography,\n 40, 394-400.\n\n Modifications:\n 2011-04-11. Trevor McDougall and Paul Barker\n \"\"\"\n\n CT = CT_from_t(SA, t, p)\n\n return enthalpy_t_exact(SA, t, p) - cp0 * CT\n\n\n@match_args_return\ndef t_maxdensity_exact(SA, p):\n r\"\"\"Calculates the in-situ temperature of maximum density of seawater.\n This function returns the in-situ temperature at which the density of\n seawater is a maximum, at given Absolute Salinity, SA, and sea pressure, p\n (in dbar).\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n p : array_like\n pressure [dbar]\n\n Returns\n -------\n t_maxdensity_exact : array_like\n max in-situ temperature [:math:`^\\circ` C]\n\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See section 3.42.\n\n Modifications:\n 2011-04-03. Trevor McDougall and Paul Barker\n \"\"\"\n\n # The temperature increment for calculating the gibbs_PTT derivative.\n dt = 0.001\n t = 3.978 - 0.22072 * SA # The initial guess of t_maxden.\n gibbs_PTT = 1.1e-8 # The initial guess for g_PTT.\n\n for Number_of_iterations in range(0, 3):\n t_old = t\n gibbs_PT = gibbs(n0, n1, n1, SA, t_old, p)\n # Half way through the mod. method (McDougall and Wotherspoon, 2012)\n t = t_old - gibbs_PT / gibbs_PTT\n t_mean = 0.5 * (t + t_old)\n gibbs_PTT = (gibbs(n0, n1, n1, SA, t_mean + dt, p) -\n gibbs(n0, n1, n1, SA, t_mean - dt, p)) / (dt + dt)\n t = t_old - gibbs_PT / gibbs_PTT\n\n # After three iterations of this modified Newton-Raphson iteration, the\n # error in t_maxdensity_exact is typically no larger than 1x10^-15 deg C.\n\n return t\n\n\n@match_args_return\ndef osmotic_pressure_t_exact(SA, t, pw):\n r\"\"\"Calculates the osmotic pressure of seawater.\n\n Parameters\n ----------\n SA : array_like\n Absolute salinity [g kg :sup:`-1`]\n t : array_like\n in situ temperature [:math:`^\\circ` C (ITS-90)]\n pw : array_like\n sea pressure of the pure water side [dbar]\n\n Returns\n -------\n osmotic_pressure_t_exact : array_like\n dynamic osmotic pressure of seawater [dbar]\n\n\n See Also\n --------\n TODO\n\n Notes\n -----\n TODO\n\n Examples\n --------\n\n References\n ----------\n .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation\n of seawater - 2010: Calculation and use of thermodynamic properties.\n Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,\n UNESCO (English), 196 pp. See section 3.41.\n\n Modifications:\n 2011-05-26. Trevor McDougall and Paul Barker\n \"\"\"\n SA = np.maximum(SA, 0)\n gibbs_pure_water = gibbs(0, 0, 0, 0, t, pw)\n\n # Initial guess of p, in dbar.\n p = pw + 235.4684\n\n # Initial guess of df/dp.\n df_dp = -db2Pascal * (gibbs(n0, n0, n1, SA, t, p) -\n SA * gibbs(n1, n0, n1, SA, t, p))\n\n for Number_of_iterations in range(0, 2):\n p_old = p\n f = gibbs_pure_water - chem_potential_water_t_exact(SA, t, p_old)\n # This is half way through the modified N-R method.\n p = p_old - f / df_dp\n p_mean = 0.5 * (p + p_old)\n df_dp = -db2Pascal * (gibbs(0, 0, 1, SA, t, p_mean) -\n SA * gibbs(1, 0, 1, SA, t, p_mean))\n p = p_old - f / df_dp\n\n # After two iterations though the modified Newton-Raphson technique the\n # maximum error is 6x10^-12 dbar.\n\n # Osmotic pressure of seawater in dbar.\n return p - pw\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n"
] |
[
[
"numpy.maximum",
"numpy.sqrt",
"numpy.zeros_like",
"numpy.broadcast_arrays",
"numpy.ravel",
"numpy.logical_and"
],
[
"numpy.maximum",
"numpy.sqrt",
"numpy.ones_like",
"numpy.isnan",
"numpy.logical_or",
"numpy.zeros_like",
"numpy.ma.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
coryell/TensorNetwork
|
[
"9225390dc75c4a5f1d3f963608249a0c3aca826c"
] |
[
"tensornetwork/backends/shell/shell_backend.py"
] |
[
"# Copyright 2019 The TensorNetwork Authors\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport functools\r\nimport operator\r\nfrom tensornetwork.backends import base_backend\r\n#pylint: disable=line-too-long\r\nfrom typing import Optional, Sequence, Tuple, List, Any, Union, Type, Callable, Text\r\nimport numpy as np\r\n\r\n\r\nclass ShellTensor:\r\n\r\n def __init__(self, shape: Tuple[int, ...], dtype=None):\r\n self.shape = shape\r\n self.dtype = dtype\r\n\r\n def reshape(self, new_shape: Tuple[int, ...]):\r\n self.shape = new_shape\r\n return self\r\n\r\n\r\nTensor = ShellTensor\r\n\r\n\r\nclass ShellBackend(base_backend.BaseBackend):\r\n \"\"\"See base_backend.BaseBackend for documentation.\"\"\"\r\n\r\n def __init__(self):\r\n super(ShellBackend, self).__init__()\r\n self.name = \"shell\"\r\n\r\n def tensordot(self, a: Tensor, b: Tensor,\r\n axes: Sequence[Sequence[int]]) -> Tensor:\r\n # Does not work when axis < 0\r\n gen_a = (x for i, x in enumerate(a.shape) if i not in axes[0])\r\n gen_b = (x for i, x in enumerate(b.shape) if i not in axes[1])\r\n return ShellTensor(tuple(self._concat_generators(gen_a, gen_b)))\r\n\r\n def _concat_generators(self, *gen):\r\n \"\"\"Concatenates Python generators.\"\"\"\r\n for g in gen:\r\n yield from g\r\n\r\n def reshape(self, tensor: Tensor, shape: Sequence[int]) -> Tensor:\r\n tensor = tensor.reshape(tuple(shape))\r\n return tensor\r\n\r\n def transpose(self, tensor: Tensor, perm: Sequence[int]) -> Tensor:\r\n shape = tuple(tensor.shape[i] for i in perm)\r\n tensor = tensor.reshape(tuple(shape))\r\n return tensor\r\n\r\n def svd_decomposition(self,\r\n tensor: Tensor,\r\n split_axis: int,\r\n max_singular_values: Optional[int] = None,\r\n max_truncation_error: Optional[float] = None,\r\n relative: Optional[bool] = False\r\n ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\r\n if max_truncation_error is not None:\r\n raise NotImplementedError(\"SVD with truncation shape cannot be \"\r\n \"calculated without explicit tensor values.\")\r\n left_dims = tensor.shape[:split_axis]\r\n right_dims = tensor.shape[split_axis:]\r\n dim_s0 = min(\r\n functools.reduce(operator.mul, left_dims),\r\n functools.reduce(operator.mul, right_dims))\r\n if max_singular_values is not None:\r\n dim_s = min(dim_s0, max_singular_values)\r\n else:\r\n dim_s = dim_s0\r\n\r\n u = ShellTensor(left_dims + (dim_s,))\r\n vh = ShellTensor((dim_s,) + right_dims)\r\n s = ShellTensor((dim_s,))\r\n s_rest = ShellTensor((dim_s0 - dim_s,))\r\n return u, s, vh, s_rest\r\n\r\n def qr_decomposition(self, tensor: Tensor,\r\n split_axis: int) -> Tuple[Tensor, Tensor]:\r\n\r\n left_dims = tensor.shape[:split_axis]\r\n right_dims = tensor.shape[split_axis:]\r\n center_dim = min(tensor.shape)\r\n q = ShellTensor(left_dims + (center_dim,))\r\n r = ShellTensor((center_dim,) + right_dims)\r\n return q, r\r\n\r\n def rq_decomposition(self, tensor: Tensor,\r\n split_axis: int) -> Tuple[Tensor, Tensor]:\r\n\r\n left_dims = tensor.shape[:split_axis]\r\n right_dims = tensor.shape[split_axis:]\r\n center_dim = min(tensor.shape)\r\n q = ShellTensor(left_dims + (center_dim,))\r\n r = ShellTensor((center_dim,) + right_dims)\r\n return q, r\r\n\r\n def shape_concat(self, values: Sequence[Tensor], axis: int) -> Tensor:\r\n shape = values[0].shape\r\n if axis < 0:\r\n axis += len(shape)\r\n concat_size = sum(v.shape[axis] for v in values)\r\n new_shape = shape[:axis] + (concat_size,) + shape[axis + 1:]\r\n return ShellTensor(new_shape)\r\n\r\n def concat_shape(self, values) -> Sequence:\r\n tuple_values = (tuple(v) for v in values)\r\n return functools.reduce(operator.concat, tuple_values)\r\n\r\n def shape_tensor(self, tensor: Tensor) -> Tuple:\r\n return tensor.shape\r\n\r\n def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]:\r\n return tensor.shape\r\n\r\n def shape_prod(self, values: Tensor) -> int:\r\n # This is different from the BaseBackend prod!\r\n # prod calculates the product of tensor elements and cannot implemented\r\n # for shell tensors\r\n # This returns the product of sizes instead\r\n return self.shape_product(values.shape)\r\n\r\n def shape_product(self, shape: Sequence[int]) -> int:\r\n return functools.reduce(operator.mul, shape)\r\n\r\n def sqrt(self, tensor: Tensor) -> Tensor:\r\n return tensor\r\n\r\n def diag(self, tensor: Tensor) -> Tensor:\r\n shape = tensor.shape\r\n new_tensor = ShellTensor((3 - len(shape)) * shape)\r\n return new_tensor\r\n\r\n def convert_to_tensor(self, tensor: Any) -> Tensor:\r\n shell_tensor = ShellTensor(tuple(tensor.shape))\r\n return shell_tensor\r\n\r\n def trace(self, tensor: Tensor) -> Tensor:\r\n return ShellTensor(tensor.shape[:-2])\r\n\r\n def outer_product(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:\r\n return ShellTensor(tensor1.shape + tensor2.shape)\r\n\r\n def einsum(self, expression: str, *tensors: Tensor) -> Tensor:\r\n expr_list = expression.split(\",\")\r\n expr_list[-1], res = expr_list[-1].split(\"->\")\r\n shape = tuple(self._find_char(expr_list, char, tensors) for char in res)\r\n return ShellTensor(shape)\r\n\r\n def _find_char(self, expr_list: List[str], char: str,\r\n tensors: Sequence[Tensor]) -> int:\r\n \"\"\"Finds character in einsum tensor expression.\r\n\r\n Args:\r\n expr_list: List with expression for input tensors in einsum.\r\n char: One character string (letter) that corresponds to a specific\r\n einsum component.\r\n\r\n Returns:\r\n size: Size of the axis that corresponds to this einsum expression\r\n character.\r\n \"\"\"\r\n for i, expr in enumerate(expr_list):\r\n ind = expr.find(char)\r\n if ind != -1:\r\n return tensors[i].shape[ind]\r\n raise ValueError(\"Einsum output expression contains letters not given\"\r\n \"in input.\")\r\n\r\n def norm(self, tensor: Tensor) -> Tensor:\r\n return ShellTensor(())\r\n\r\n def eye(self,\r\n N: int,\r\n dtype: Optional[Type[np.number]] = None,\r\n M: Optional[int] = None) -> Tensor:\r\n if not M:\r\n M = N\r\n return ShellTensor((N, M))\r\n\r\n def ones(self,\r\n shape: Tuple[int, ...],\r\n dtype: Optional[Type[np.number]] = None) -> Tensor:\r\n return ShellTensor(shape)\r\n\r\n def zeros(self,\r\n shape: Tuple[int, ...],\r\n dtype: Optional[Type[np.number]] = None) -> Tensor:\r\n\r\n return ShellTensor(shape)\r\n\r\n def randn(self,\r\n shape: Tuple[int, ...],\r\n dtype: Optional[Type[np.number]] = None,\r\n seed: Optional[int] = None) -> Tensor:\r\n return ShellTensor(shape)\r\n\r\n def random_uniform(self,\r\n shape: Tuple[int, ...],\r\n boundaries: Optional[Tuple[float, float]] = (0.0, 1.0),\r\n dtype: Optional[Type[np.number]] = None,\r\n seed: Optional[int] = None) -> Tensor:\r\n return ShellTensor(shape)\r\n\r\n def conj(self, tensor: Tensor) -> Tensor:\r\n return tensor\r\n\r\n def eigh(self, matrix: Tensor) -> Tuple[Tensor, Tensor]:\r\n shape = matrix.shape\r\n return ShellTensor((shape[0],)), ShellTensor(shape)\r\n\r\n def eigs(self,\r\n A: Callable,\r\n initial_state: Optional[Tensor] = None,\r\n num_krylov_vecs: Optional[int] = 200,\r\n numeig: Optional[int] = 1,\r\n tol: Optional[float] = 1E-8,\r\n which: Optional[Text] = 'LR',\r\n maxiter: Optional[int] = None,\r\n dtype: Optional[Type] = None) -> Tuple[List, List]:\r\n\r\n if (initial_state is not None) and hasattr(A, 'shape'):\r\n if initial_state.shape != A.shape[1]:\r\n raise ValueError(\r\n \"A.shape[1]={} and initial_state.shape={} are incompatible.\".format(\r\n A.shape[1], initial_state.shape))\r\n\r\n if initial_state is None:\r\n if not hasattr(A, 'shape'):\r\n raise AttributeError(\"`A` has no attribute `shape`. Cannot initialize \"\r\n \"lanczos. Please provide a valid `initial_state`\")\r\n return [ShellTensor(tuple()) for _ in range(numeig)\r\n ], [ShellTensor((A.shape[0],)) for _ in range(numeig)]\r\n\r\n if initial_state is not None:\r\n return [ShellTensor(tuple()) for _ in range(numeig)\r\n ], [ShellTensor(initial_state.shape) for _ in range(numeig)]\r\n\r\n raise ValueError(\r\n '`A` has no attribut shape and no `initial_state` is given.')\r\n\r\n def eigsh_lanczos(self,\r\n A: Callable,\r\n initial_state: Optional[Tensor] = None,\r\n num_krylov_vecs: Optional[int] = 200,\r\n numeig: Optional[int] = 1,\r\n tol: Optional[float] = 1E-8,\r\n delta: Optional[float] = 1E-8,\r\n ndiag: Optional[int] = 20,\r\n reorthogonalize: Optional[bool] = False\r\n ) -> Tuple[List, List]:\r\n\r\n if num_krylov_vecs < numeig:\r\n raise ValueError('`num_krylov_vecs` >= `numeig` required!')\r\n\r\n if numeig > 1 and not reorthogonalize:\r\n raise ValueError(\r\n \"Got numeig = {} > 1 and `reorthogonalize = False`. \"\r\n \"Use `reorthogonalize=True` for `numeig > 1`\".format(numeig))\r\n\r\n if (initial_state is not None) and hasattr(A, 'shape'):\r\n if initial_state.shape != A.shape[1]:\r\n raise ValueError(\r\n \"A.shape[1]={} and initial_state.shape={} are incompatible.\".format(\r\n A.shape[1], initial_state.shape))\r\n\r\n if initial_state is None:\r\n if not hasattr(A, 'shape'):\r\n raise AttributeError(\"`A` has no attribute `shape`. Cannot initialize \"\r\n \"lanczos. Please provide a valid `initial_state`\")\r\n return [ShellTensor(tuple()) for _ in range(numeig)\r\n ], [ShellTensor(A.shape[0]) for _ in range(numeig)]\r\n\r\n if initial_state is not None:\r\n return [ShellTensor(tuple()) for _ in range(numeig)\r\n ], [ShellTensor(initial_state.shape) for _ in range(numeig)]\r\n\r\n raise ValueError(\r\n '`A` has no attribut shape adn no `initial_state` is given.')\r\n\r\n def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:\r\n raise NotImplementedError(\"Shell tensor has not implemented addition( + )\")\r\n\r\n def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:\r\n raise NotImplementedError(\"Shell tensor has not implemented subtraction( - )\")\r\n\r\n def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:\r\n a = np.ones(tensor1.shape)\r\n b = np.ones(tensor2.shape)\r\n return ShellTensor((a * b).shape)\r\n\r\n def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:\r\n raise NotImplementedError(\"Shell tensor has not implemented add( / )\")\r\n\r\n def index_update(self, tensor: Tensor, mask: Tensor,\r\n assignee: Tensor) -> Tensor:\r\n return ShellTensor(tensor.shape)\r\n\r\n def inv(self, matrix: Tensor) -> Tensor:\r\n if len(matrix.shape) > 2:\r\n raise ValueError(\r\n \"input to shell backend method `inv` has shape {}. Only matrices are supported.\"\r\n .format(matrix.shape))\r\n return ShellTensor(matrix.shape)\r\n"
] |
[
[
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abhinavagarwalla/MAL-inference-deepsort
|
[
"3dc2010f76dc249e60d3e970247faa7e7c5ffca6"
] |
[
"setup.py"
] |
[
"from setuptools import setup\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\nsetup(\n name='retinanet',\n version='0.1',\n description='Fast and accurate single shot object detector',\n author = 'NVIDIA Corporation',\n author_email='[email protected]',\n packages=['retinanet', 'retinanet.backbones'],\n ext_modules=[CUDAExtension('retinanet._C',\n ['csrc/extensions.cpp', 'csrc/engine.cpp', 'csrc/cuda/decode.cu', 'csrc/cuda/nms.cu'],\n extra_compile_args={\n 'cxx': ['-std=c++14', '-O2', '-Wall'],\n 'nvcc': [\n '-std=c++14', '--expt-extended-lambda', '--use_fast_math', '-Xcompiler', '-Wall',\n '-gencode=arch=compute_50,code=sm_50', '-gencode=arch=compute_52,code=sm_52',\n '-gencode=arch=compute_60,code=sm_60', '-gencode=arch=compute_61,code=sm_61',\n '-gencode=arch=compute_70,code=sm_70', '-gencode=arch=compute_72,code=sm_72',\n '-gencode=arch=compute_75,code=sm_75', '-gencode=arch=compute_75,code=compute_75'\n ],\n },\n library_dirs= ['/usr/local/lib/'],\n libraries=['nvinfer', 'nvinfer_plugin', 'nvonnxparser', 'opencv_core', 'opencv_highgui', 'opencv_imgproc', 'opencv_imgcodecs'])\n ],\n cmdclass={'build_ext': BuildExtension.with_options(no_python_abi_suffix=True)},\n install_requires=[\n 'torch>=1.0.0a0',\n #'torchvision',\n 'apex @ git+https://github.com/NVIDIA/apex',\n 'pycocotools @ git+https://github.com/nvidia/cocoapi.git#subdirectory=PythonAPI',\n 'pillow>=6.2.2',\n 'requests',\n ],\n entry_points = {'console_scripts': ['retinanet=retinanet.main:main']}\n)\n"
] |
[
[
"torch.utils.cpp_extension.CUDAExtension",
"torch.utils.cpp_extension.BuildExtension.with_options"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
doublejtoh/tensorflow-resnet-image-clustering
|
[
"b81ba44910c863f14e1a0b5b3422226c1241e8a1"
] |
[
"src/main.py"
] |
[
"import tensorflow as tf\nfrom os import path as ospath\nfrom model import Model\nfrom config import LABELS_PRED, TRAINING_IMG_DIR, TRAINING_DATA_DIR, TRAINING_JSON_PATH, TEST_IMG_DIR, TEST_DATA_DIR, CHECKPOINT_PATH, CHECKPOINT_SAVE_EPOCH, CHECKPOINT_MAX_TO_KEEP, _IMAGE_WIDTH, _IMAGE_HEIGHT, _IMAGE_CHANNELS, _NUM_CLASSES, _NUM_IMAGES\n\ndef define_flags():\n tf.app.flags.DEFINE_integer('max_training_epochs', 100000,\n 'Maximum training epoch. \\n'\n 'If larger, training ends.')\n tf.app.flags.DEFINE_integer('batch_size', 16,\n 'Batch size')\n tf.app.flags.DEFINE_string('training_data_dir', TRAINING_IMG_DIR,\n 'Training data directory')\n tf.app.flags.DEFINE_string('training_json_path', TRAINING_JSON_PATH,\n 'Training data labels mapping file path')\n tf.app.flags.DEFINE_string('test_data_dir', TEST_IMG_DIR,\n 'Test data directory')\n tf.app.flags.DEFINE_string('checkpoint_path', CHECKPOINT_PATH,\n 'Save/Saved checkpoint path')\n tf.app.flags.DEFINE_integer('num_images', _NUM_IMAGES,\n 'Total number of training data images.')\n tf.app.flags.DEFINE_integer('checkpoint_save_epoch', CHECKPOINT_SAVE_EPOCH,\n 'Checkpoint save for every \"checkpoint_save_epoch\" epoch.')\n tf.app.flags.DEFINE_integer('checkpoint_max_to_keep', CHECKPOINT_MAX_TO_KEEP,\n 'Checkpoint files max to keep')\n tf.app.flags.DEFINE_integer('resnet_size', 50,\n 'resnet size selection.'\n 'must be one of [50, 101, 152]')\n tf.app.flags.DEFINE_boolean('training_predict', False,\n 'On training dataset, \\n'\n 'make labels_pred.txt (predictions) \\n')\n tf.app.flags.DEFINE_string('training_predict_output_path', TRAINING_DATA_DIR,\n 'Output path where labels_pred.txt and \\n')\n tf.app.flags.DEFINE_boolean('test_predict', False,\n 'On test dataset, \\n'\n 'make labels_pred.txt (predictions) \\n')\n tf.app.flags.DEFINE_string('test_predict_output_path', TEST_DATA_DIR,\n 'Output path where labels_pred.txt and \\n')\n\ndef main():\n FLAGS = tf.app.flags.FLAGS\n resnet_model = Model(\n resnet_size=FLAGS.resnet_size,\n initial_kernel_size=7,\n initial_kernel_stride=2,\n kernel_strides=[1, 2, 2, 2],\n initial_pool_size=3,\n initial_pool_stride=2,\n initial_filters=64,\n input_width=_IMAGE_WIDTH,\n input_height=_IMAGE_HEIGHT,\n input_channels=_IMAGE_CHANNELS,\n num_classes=_NUM_CLASSES,\n data_format='channels_last'\n )\n if FLAGS.training_predict:\n resnet_model.predict(\n flags=FLAGS,\n data_dir=FLAGS.training_data_dir,\n pred_out_path=ospath.join(FLAGS.training_predict_output_path, LABELS_PRED)\n )\n elif FLAGS.test_predict:\n resnet_model.predict(\n flags=FLAGS,\n data_dir=FLAGS.test_data_dir,\n pred_out_path=ospath.join(FLAGS.test_predict_output_path, LABELS_PRED)\n )\n else:\n resnet_model.train(FLAGS)\n\n\nif __name__ == '__main__':\n define_flags()\n main()\n"
] |
[
[
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.app.flags.DEFINE_integer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mottl/pandas
|
[
"d7af297d6fd70be6b1b0c03771127b9aedcef84b",
"6111f645c5adc5bdcd3810b4112392bda3583d59"
] |
[
"pandas/core/groupby/groupby.py",
"pandas/tests/arrays/categorical/test_constructors.py"
] |
[
"\"\"\"\nProvide the groupby split-apply-combine paradigm. Define the GroupBy\nclass providing the base-class of operations.\n\nThe SeriesGroupBy and DataFrameGroupBy sub-class\n(defined in pandas.core.groupby.generic)\nexpose these user-facing objects to provide specific functionailty.\n\"\"\"\n\nimport collections\nfrom contextlib import contextmanager\nimport datetime\nfrom functools import partial, wraps\nimport types\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import Timestamp, groupby as libgroupby\nimport pandas.compat as compat\nfrom pandas.compat import callable, range, set_function_name, zip\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\nfrom pandas.util._validators import validate_kwargs\n\nfrom pandas.core.dtypes.cast import maybe_downcast_to_dtype\nfrom pandas.core.dtypes.common import (\n ensure_float, is_extension_array_dtype, is_numeric_dtype, is_scalar)\nfrom pandas.core.dtypes.missing import isna, notna\n\nimport pandas.core.algorithms as algorithms\nfrom pandas.core.base import (\n DataError, GroupByError, PandasObject, SelectionMixin, SpecificationError)\nimport pandas.core.common as com\nfrom pandas.core.config import option_context\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.groupby import base\nfrom pandas.core.index import Index, MultiIndex\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import get_group_index_sorter\n\n_common_see_also = \"\"\"\n See Also\n --------\n pandas.Series.%(name)s\n pandas.DataFrame.%(name)s\n pandas.Panel.%(name)s\n\"\"\"\n\n_apply_docs = dict(\n template=\"\"\"\n Apply function `func` group-wise and combine the results together.\n\n The function passed to `apply` must take a {input} as its first\n argument and return a DataFrame, Series or scalar. `apply` will\n then take care of combining the results back together into a single\n dataframe or series. `apply` is therefore a highly flexible\n grouping method.\n\n While `apply` is a very flexible method, its downside is that\n using it can be quite a bit slower than using more specific methods\n like `agg` or `transform`. Pandas offers a wide range of method that will\n be much faster than using `apply` for their specific purposes, so try to\n use them before reaching for `apply`.\n\n Parameters\n ----------\n func : callable\n A callable that takes a {input} as its first argument, and\n returns a dataframe, a series or a scalar. In addition the\n callable may take positional and keyword arguments.\n args, kwargs : tuple and dict\n Optional positional and keyword arguments to pass to `func`.\n\n Returns\n -------\n applied : Series or DataFrame\n\n See Also\n --------\n pipe : Apply function to the full GroupBy object instead of to each\n group.\n aggregate : Apply aggregate function to the GroupBy object.\n transform : Apply function column-by-column to the GroupBy object.\n Series.apply : Apply a function to a Series.\n DataFrame.apply : Apply a function to each row or column of a DataFrame.\n \"\"\",\n dataframe_examples=\"\"\"\n >>> df = pd.DataFrame({'A': 'a a b'.split(),\n 'B': [1,2,3],\n 'C': [4,6, 5]})\n >>> g = df.groupby('A')\n\n Notice that ``g`` has two groups, ``a`` and ``b``.\n Calling `apply` in various ways, we can get different grouping results:\n\n Example 1: below the function passed to `apply` takes a DataFrame as\n its argument and returns a DataFrame. `apply` combines the result for\n each group together into a new DataFrame:\n\n >>> g[['B', 'C']].apply(lambda x: x / x.sum())\n B C\n 0 0.333333 0.4\n 1 0.666667 0.6\n 2 1.000000 1.0\n\n Example 2: The function passed to `apply` takes a DataFrame as\n its argument and returns a Series. `apply` combines the result for\n each group together into a new DataFrame:\n\n >>> g[['B', 'C']].apply(lambda x: x.max() - x.min())\n B C\n A\n a 1 2\n b 0 0\n\n Example 3: The function passed to `apply` takes a DataFrame as\n its argument and returns a scalar. `apply` combines the result for\n each group together into a Series, including setting the index as\n appropriate:\n\n >>> g.apply(lambda x: x.C.max() - x.B.min())\n A\n a 5\n b 2\n dtype: int64\n \"\"\",\n series_examples=\"\"\"\n >>> s = pd.Series([0, 1, 2], index='a a b'.split())\n >>> g = s.groupby(s.index)\n\n From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.\n Calling `apply` in various ways, we can get different grouping results:\n\n Example 1: The function passed to `apply` takes a Series as\n its argument and returns a Series. `apply` combines the result for\n each group together into a new Series:\n\n >>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)\n 0 0.0\n 1 0.5\n 2 4.0\n dtype: float64\n\n Example 2: The function passed to `apply` takes a Series as\n its argument and returns a scalar. `apply` combines the result for\n each group together into a Series, including setting the index as\n appropriate:\n\n >>> g.apply(lambda x: x.max() - x.min())\n a 1\n b 0\n dtype: int64\n\n Notes\n -----\n In the current implementation `apply` calls `func` twice on the\n first group to decide whether it can take a fast or slow code\n path. This can lead to unexpected behavior if `func` has\n side-effects, as they will take effect twice for the first\n group.\n\n Examples\n --------\n {examples}\n \"\"\")\n\n_pipe_template = \"\"\"\\\nApply a function `func` with arguments to this %(klass)s object and return\nthe function's result.\n\n%(versionadded)s\n\nUse `.pipe` when you want to improve readability by chaining together\nfunctions that expect Series, DataFrames, GroupBy or Resampler objects.\nInstead of writing\n\n>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)\n\nYou can write\n\n>>> (df.groupby('group')\n... .pipe(f)\n... .pipe(g, arg1=a)\n... .pipe(h, arg2=b, arg3=c))\n\nwhich is much more readable.\n\nParameters\n----------\nfunc : callable or tuple of (callable, string)\n Function to apply to this %(klass)s object or, alternatively,\n a `(callable, data_keyword)` tuple where `data_keyword` is a\n string indicating the keyword of `callable` that expects the\n %(klass)s object.\nargs : iterable, optional\n positional arguments passed into `func`.\nkwargs : dict, optional\n a dictionary of keyword arguments passed into `func`.\n\nReturns\n-------\nobject : the return type of `func`.\n\nSee Also\n--------\npandas.Series.pipe : Apply a function with arguments to a series.\npandas.DataFrame.pipe: Apply a function with arguments to a dataframe.\napply : Apply function to each group instead of to the\n full %(klass)s object.\n\nNotes\n-----\nSee more `here\n<http://pandas.pydata.org/pandas-docs/stable/groupby.html#piping-function-calls>`_\n\nExamples\n--------\n%(examples)s\n\"\"\"\n\n_transform_template = \"\"\"\nCall function producing a like-indexed %(klass)s on each group and\nreturn a %(klass)s having the same indexes as the original object\nfilled with the transformed values\n\nParameters\n----------\nf : function\n Function to apply to each group\n\nReturns\n-------\n%(klass)s\n\nSee Also\n--------\naggregate, transform\n\nNotes\n-----\nEach group is endowed the attribute 'name' in case you need to know\nwhich group you are working on.\n\nThe current implementation imposes three requirements on f:\n\n* f must return a value that either has the same shape as the input\n subframe or can be broadcast to the shape of the input subframe.\n For example, f returns a scalar it will be broadcast to have the\n same shape as the input subframe.\n* if this is a DataFrame, f must support application column-by-column\n in the subframe. If f also supports application to the entire subframe,\n then a fast path is used starting from the second chunk.\n* f must not mutate groups. Mutation is not supported and may\n produce unexpected results.\n\nExamples\n--------\n\n# Same shape\n>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n... 'foo', 'bar'],\n... 'B' : ['one', 'one', 'two', 'three',\n... 'two', 'two'],\n... 'C' : [1, 5, 5, 2, 5, 5],\n... 'D' : [2.0, 5., 8., 1., 2., 9.]})\n>>> grouped = df.groupby('A')\n>>> grouped.transform(lambda x: (x - x.mean()) / x.std())\n C D\n0 -1.154701 -0.577350\n1 0.577350 0.000000\n2 0.577350 1.154701\n3 -1.154701 -1.000000\n4 0.577350 -0.577350\n5 0.577350 1.000000\n\n# Broadcastable\n>>> grouped.transform(lambda x: x.max() - x.min())\n C D\n0 4 6.0\n1 3 8.0\n2 4 6.0\n3 3 8.0\n4 4 6.0\n5 3 8.0\n\"\"\"\n\n\nclass GroupByPlot(PandasObject):\n \"\"\"\n Class implementing the .plot attribute for groupby objects.\n \"\"\"\n\n def __init__(self, groupby):\n self._groupby = groupby\n\n def __call__(self, *args, **kwargs):\n def f(self):\n return self.plot(*args, **kwargs)\n f.__name__ = 'plot'\n return self._groupby.apply(f)\n\n def __getattr__(self, name):\n def attr(*args, **kwargs):\n def f(self):\n return getattr(self.plot, name)(*args, **kwargs)\n return self._groupby.apply(f)\n return attr\n\n\n@contextmanager\ndef _group_selection_context(groupby):\n \"\"\"\n Set / reset the _group_selection_context.\n \"\"\"\n groupby._set_group_selection()\n yield groupby\n groupby._reset_group_selection()\n\n\nclass _GroupBy(PandasObject, SelectionMixin):\n _group_selection = None\n _apply_whitelist = frozenset()\n\n def __init__(self, obj, keys=None, axis=0, level=None,\n grouper=None, exclusions=None, selection=None, as_index=True,\n sort=True, group_keys=True, squeeze=False,\n observed=False, **kwargs):\n\n self._selection = selection\n\n if isinstance(obj, NDFrame):\n obj._consolidate_inplace()\n\n self.level = level\n\n if not as_index:\n if not isinstance(obj, DataFrame):\n raise TypeError('as_index=False only valid with DataFrame')\n if axis != 0:\n raise ValueError('as_index=False only valid for axis=0')\n\n self.as_index = as_index\n self.keys = keys\n self.sort = sort\n self.group_keys = group_keys\n self.squeeze = squeeze\n self.observed = observed\n self.mutated = kwargs.pop('mutated', False)\n\n if grouper is None:\n from pandas.core.groupby.grouper import _get_grouper\n grouper, exclusions, obj = _get_grouper(obj, keys,\n axis=axis,\n level=level,\n sort=sort,\n observed=observed,\n mutated=self.mutated)\n\n self.obj = obj\n self.axis = obj._get_axis_number(axis)\n self.grouper = grouper\n self.exclusions = set(exclusions) if exclusions else set()\n\n # we accept no other args\n validate_kwargs('group', kwargs, {})\n\n def __len__(self):\n return len(self.groups)\n\n def __unicode__(self):\n # TODO: Better unicode/repr for GroupBy object\n return object.__repr__(self)\n\n def _assure_grouper(self):\n \"\"\"\n We create the grouper on instantiation sub-classes may have a\n different policy.\n \"\"\"\n pass\n\n @property\n def groups(self):\n \"\"\"\n Dict {group name -> group labels}.\n \"\"\"\n self._assure_grouper()\n return self.grouper.groups\n\n @property\n def ngroups(self):\n self._assure_grouper()\n return self.grouper.ngroups\n\n @property\n def indices(self):\n \"\"\"\n Dict {group name -> group indices}.\n \"\"\"\n self._assure_grouper()\n return self.grouper.indices\n\n def _get_indices(self, names):\n \"\"\"\n Safe get multiple indices, translate keys for\n datelike to underlying repr.\n \"\"\"\n\n def get_converter(s):\n # possibly convert to the actual key types\n # in the indices, could be a Timestamp or a np.datetime64\n if isinstance(s, (Timestamp, datetime.datetime)):\n return lambda key: Timestamp(key)\n elif isinstance(s, np.datetime64):\n return lambda key: Timestamp(key).asm8\n else:\n return lambda key: key\n\n if len(names) == 0:\n return []\n\n if len(self.indices) > 0:\n index_sample = next(iter(self.indices))\n else:\n index_sample = None # Dummy sample\n\n name_sample = names[0]\n if isinstance(index_sample, tuple):\n if not isinstance(name_sample, tuple):\n msg = (\"must supply a tuple to get_group with multiple\"\n \" grouping keys\")\n raise ValueError(msg)\n if not len(name_sample) == len(index_sample):\n try:\n # If the original grouper was a tuple\n return [self.indices[name] for name in names]\n except KeyError:\n # turns out it wasn't a tuple\n msg = (\"must supply a a same-length tuple to get_group\"\n \" with multiple grouping keys\")\n raise ValueError(msg)\n\n converters = [get_converter(s) for s in index_sample]\n names = [tuple(f(n) for f, n in zip(converters, name))\n for name in names]\n\n else:\n converter = get_converter(index_sample)\n names = [converter(name) for name in names]\n\n return [self.indices.get(name, []) for name in names]\n\n def _get_index(self, name):\n \"\"\"\n Safe get index, translate keys for datelike to underlying repr.\n \"\"\"\n return self._get_indices([name])[0]\n\n @cache_readonly\n def _selected_obj(self):\n\n if self._selection is None or isinstance(self.obj, Series):\n if self._group_selection is not None:\n return self.obj[self._group_selection]\n return self.obj\n else:\n return self.obj[self._selection]\n\n def _reset_group_selection(self):\n \"\"\"\n Clear group based selection.\n\n Used for methods needing to return info on each group regardless of\n whether a group selection was previously set.\n \"\"\"\n if self._group_selection is not None:\n # GH12839 clear cached selection too when changing group selection\n self._group_selection = None\n self._reset_cache('_selected_obj')\n\n def _set_group_selection(self):\n \"\"\"\n Create group based selection.\n\n Used when selection is not passed directly but instead via a grouper.\n\n NOTE: this should be paired with a call to _reset_group_selection\n \"\"\"\n grp = self.grouper\n if not (self.as_index and\n getattr(grp, 'groupings', None) is not None and\n self.obj.ndim > 1 and\n self._group_selection is None):\n return\n\n ax = self.obj._info_axis\n groupers = [g.name for g in grp.groupings\n if g.level is None and g.in_axis]\n\n if len(groupers):\n # GH12839 clear selected obj cache when group selection changes\n self._group_selection = ax.difference(Index(groupers),\n sort=False).tolist()\n self._reset_cache('_selected_obj')\n\n def _set_result_index_ordered(self, result):\n # set the result index on the passed values object and\n # return the new object, xref 8046\n\n # the values/counts are repeated according to the group index\n # shortcut if we have an already ordered grouper\n if not self.grouper.is_monotonic:\n index = Index(np.concatenate(\n self._get_indices(self.grouper.result_index)))\n result.set_axis(index, axis=self.axis, inplace=True)\n result = result.sort_index(axis=self.axis)\n\n result.set_axis(self.obj._get_axis(self.axis), axis=self.axis,\n inplace=True)\n return result\n\n def _dir_additions(self):\n return self.obj._dir_additions() | self._apply_whitelist\n\n def __getattr__(self, attr):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self.obj:\n return self[attr]\n if hasattr(self.obj, attr):\n return self._make_wrapper(attr)\n\n raise AttributeError(\"%r object has no attribute %r\" %\n (type(self).__name__, attr))\n\n @Substitution(klass='GroupBy',\n versionadded='.. versionadded:: 0.21.0',\n examples=\"\"\"\\\n>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})\n>>> df\n A B\n0 a 1\n1 b 2\n2 a 3\n3 b 4\n\nTo get the difference between each groups maximum and minimum value in one\npass, you can do\n\n>>> df.groupby('A').pipe(lambda x: x.max() - x.min())\n B\nA\na 2\nb 2\"\"\")\n @Appender(_pipe_template)\n def pipe(self, func, *args, **kwargs):\n return com._pipe(self, func, *args, **kwargs)\n\n plot = property(GroupByPlot)\n\n def _make_wrapper(self, name):\n if name not in self._apply_whitelist:\n is_callable = callable(getattr(self._selected_obj, name, None))\n kind = ' callable ' if is_callable else ' '\n msg = (\"Cannot access{0}attribute {1!r} of {2!r} objects, try \"\n \"using the 'apply' method\".format(kind, name,\n type(self).__name__))\n raise AttributeError(msg)\n\n self._set_group_selection()\n\n # need to setup the selection\n # as are not passed directly but in the grouper\n f = getattr(self._selected_obj, name)\n if not isinstance(f, types.MethodType):\n return self.apply(lambda self: getattr(self, name))\n\n f = getattr(type(self._selected_obj), name)\n\n def wrapper(*args, **kwargs):\n # a little trickery for aggregation functions that need an axis\n # argument\n kwargs_with_axis = kwargs.copy()\n if ('axis' not in kwargs_with_axis or\n kwargs_with_axis['axis'] is None):\n kwargs_with_axis['axis'] = self.axis\n\n def curried_with_axis(x):\n return f(x, *args, **kwargs_with_axis)\n\n def curried(x):\n return f(x, *args, **kwargs)\n\n # preserve the name so we can detect it when calling plot methods,\n # to avoid duplicates\n curried.__name__ = curried_with_axis.__name__ = name\n\n # special case otherwise extra plots are created when catching the\n # exception below\n if name in base.plotting_methods:\n return self.apply(curried)\n\n try:\n return self.apply(curried_with_axis)\n except Exception:\n try:\n return self.apply(curried)\n except Exception:\n\n # related to : GH3688\n # try item-by-item\n # this can be called recursively, so need to raise\n # ValueError\n # if we don't have this method to indicated to aggregate to\n # mark this column as an error\n try:\n return self._aggregate_item_by_item(name,\n *args, **kwargs)\n except (AttributeError):\n raise ValueError\n\n return wrapper\n\n def get_group(self, name, obj=None):\n \"\"\"\n Constructs NDFrame from group with provided name.\n\n Parameters\n ----------\n name : object\n the name of the group to get as a DataFrame\n obj : NDFrame, default None\n the NDFrame to take the DataFrame out of. If\n it is None, the object groupby was called on will\n be used\n\n Returns\n -------\n group : same type as obj\n \"\"\"\n if obj is None:\n obj = self._selected_obj\n\n inds = self._get_index(name)\n if not len(inds):\n raise KeyError(name)\n\n return obj._take(inds, axis=self.axis)\n\n def __iter__(self):\n \"\"\"\n Groupby iterator.\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n \"\"\"\n return self.grouper.get_iterator(self.obj, axis=self.axis)\n\n @Appender(_apply_docs['template']\n .format(input=\"dataframe\",\n examples=_apply_docs['dataframe_examples']))\n def apply(self, func, *args, **kwargs):\n\n func = self._is_builtin_func(func)\n\n # this is needed so we don't try and wrap strings. If we could\n # resolve functions to their callable functions prior, this\n # wouldn't be needed\n if args or kwargs:\n if callable(func):\n\n @wraps(func)\n def f(g):\n with np.errstate(all='ignore'):\n return func(g, *args, **kwargs)\n else:\n raise ValueError('func must be a callable if args or '\n 'kwargs are supplied')\n else:\n f = func\n\n # ignore SettingWithCopy here in case the user mutates\n with option_context('mode.chained_assignment', None):\n try:\n result = self._python_apply_general(f)\n except Exception:\n\n # gh-20949\n # try again, with .apply acting as a filtering\n # operation, by excluding the grouping column\n # This would normally not be triggered\n # except if the udf is trying an operation that\n # fails on *some* columns, e.g. a numeric operation\n # on a string grouper column\n\n with _group_selection_context(self):\n return self._python_apply_general(f)\n\n return result\n\n def _python_apply_general(self, f):\n keys, values, mutated = self.grouper.apply(f, self._selected_obj,\n self.axis)\n\n return self._wrap_applied_output(\n keys,\n values,\n not_indexed_same=mutated or self.mutated)\n\n def _iterate_slices(self):\n yield self._selection_name, self._selected_obj\n\n def transform(self, func, *args, **kwargs):\n raise AbstractMethodError(self)\n\n def _cumcount_array(self, ascending=True):\n \"\"\"\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from length of group - 1 to 0.\n\n Notes\n -----\n this is currently implementing sort=False\n (though the default is sort=True) for groupby in general\n \"\"\"\n ids, _, ngroups = self.grouper.group_info\n sorter = get_group_index_sorter(ids, ngroups)\n ids, count = ids[sorter], len(ids)\n\n if count == 0:\n return np.empty(0, dtype=np.int64)\n\n run = np.r_[True, ids[:-1] != ids[1:]]\n rep = np.diff(np.r_[np.nonzero(run)[0], count])\n out = (~run).cumsum()\n\n if ascending:\n out -= np.repeat(out[run], rep)\n else:\n out = np.repeat(out[np.r_[run[1:], True]], rep) - out\n\n rev = np.empty(count, dtype=np.intp)\n rev[sorter] = np.arange(count, dtype=np.intp)\n return out[rev].astype(np.int64, copy=False)\n\n def _try_cast(self, result, obj, numeric_only=False):\n \"\"\"\n Try to cast the result to our obj original type,\n we may have roundtripped through object in the mean-time.\n\n If numeric_only is True, then only try to cast numerics\n and not datetimelikes.\n\n \"\"\"\n if obj.ndim > 1:\n dtype = obj.values.dtype\n else:\n dtype = obj.dtype\n\n if not is_scalar(result):\n if is_extension_array_dtype(dtype):\n # The function can return something of any type, so check\n # if the type is compatible with the calling EA.\n try:\n result = obj.values._from_sequence(result)\n except Exception:\n # https://github.com/pandas-dev/pandas/issues/22850\n # pandas has no control over what 3rd-party ExtensionArrays\n # do in _values_from_sequence. We still want ops to work\n # though, so we catch any regular Exception.\n pass\n elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:\n result = maybe_downcast_to_dtype(result, dtype)\n\n return result\n\n def _transform_should_cast(self, func_nm):\n \"\"\"\n Parameters:\n -----------\n func_nm: str\n The name of the aggregation function being performed\n\n Returns:\n --------\n bool\n Whether transform should attempt to cast the result of aggregation\n \"\"\"\n return (self.size().fillna(0) > 0).any() and (\n func_nm not in base.cython_cast_blacklist)\n\n def _cython_transform(self, how, numeric_only=True, **kwargs):\n output = collections.OrderedDict()\n for name, obj in self._iterate_slices():\n is_numeric = is_numeric_dtype(obj.dtype)\n if numeric_only and not is_numeric:\n continue\n\n try:\n result, names = self.grouper.transform(obj.values, how,\n **kwargs)\n except NotImplementedError:\n continue\n except AssertionError as e:\n raise GroupByError(str(e))\n if self._transform_should_cast(how):\n output[name] = self._try_cast(result, obj)\n else:\n output[name] = result\n\n if len(output) == 0:\n raise DataError('No numeric types to aggregate')\n\n return self._wrap_transformed_output(output, names)\n\n def _cython_agg_general(self, how, alt=None, numeric_only=True,\n min_count=-1):\n output = {}\n for name, obj in self._iterate_slices():\n is_numeric = is_numeric_dtype(obj.dtype)\n if numeric_only and not is_numeric:\n continue\n\n try:\n result, names = self.grouper.aggregate(obj.values, how,\n min_count=min_count)\n except AssertionError as e:\n raise GroupByError(str(e))\n output[name] = self._try_cast(result, obj)\n\n if len(output) == 0:\n raise DataError('No numeric types to aggregate')\n\n return self._wrap_aggregated_output(output, names)\n\n def _python_agg_general(self, func, *args, **kwargs):\n func = self._is_builtin_func(func)\n f = lambda x: func(x, *args, **kwargs)\n\n # iterate through \"columns\" ex exclusions to populate output dict\n output = {}\n for name, obj in self._iterate_slices():\n try:\n result, counts = self.grouper.agg_series(obj, f)\n output[name] = self._try_cast(result, obj, numeric_only=True)\n except TypeError:\n continue\n\n if len(output) == 0:\n return self._python_apply_general(f)\n\n if self.grouper._filter_empty_groups:\n\n mask = counts.ravel() > 0\n for name, result in compat.iteritems(output):\n\n # since we are masking, make sure that we have a float object\n values = result\n if is_numeric_dtype(values.dtype):\n values = ensure_float(values)\n\n output[name] = self._try_cast(values[mask], result)\n\n return self._wrap_aggregated_output(output)\n\n def _wrap_applied_output(self, *args, **kwargs):\n raise AbstractMethodError(self)\n\n def _concat_objects(self, keys, values, not_indexed_same=False):\n from pandas.core.reshape.concat import concat\n\n def reset_identity(values):\n # reset the identities of the components\n # of the values to prevent aliasing\n for v in com._not_none(*values):\n ax = v._get_axis(self.axis)\n ax._reset_identity()\n return values\n\n if not not_indexed_same:\n result = concat(values, axis=self.axis)\n ax = self._selected_obj._get_axis(self.axis)\n\n if isinstance(result, Series):\n result = result.reindex(ax)\n else:\n\n # this is a very unfortunate situation\n # we have a multi-index that is NOT lexsorted\n # and we have a result which is duplicated\n # we can't reindex, so we resort to this\n # GH 14776\n if isinstance(ax, MultiIndex) and not ax.is_unique:\n indexer = algorithms.unique1d(\n result.index.get_indexer_for(ax.values))\n result = result.take(indexer, axis=self.axis)\n else:\n result = result.reindex(ax, axis=self.axis)\n\n elif self.group_keys:\n\n values = reset_identity(values)\n if self.as_index:\n\n # possible MI return case\n group_keys = keys\n group_levels = self.grouper.levels\n group_names = self.grouper.names\n\n result = concat(values, axis=self.axis, keys=group_keys,\n levels=group_levels, names=group_names,\n sort=False)\n else:\n\n # GH5610, returns a MI, with the first level being a\n # range index\n keys = list(range(len(values)))\n result = concat(values, axis=self.axis, keys=keys)\n else:\n values = reset_identity(values)\n result = concat(values, axis=self.axis)\n\n if (isinstance(result, Series) and\n getattr(self, '_selection_name', None) is not None):\n\n result.name = self._selection_name\n\n return result\n\n def _apply_filter(self, indices, dropna):\n if len(indices) == 0:\n indices = np.array([], dtype='int64')\n else:\n indices = np.sort(np.concatenate(indices))\n if dropna:\n filtered = self._selected_obj.take(indices, axis=self.axis)\n else:\n mask = np.empty(len(self._selected_obj.index), dtype=bool)\n mask.fill(False)\n mask[indices.astype(int)] = True\n # mask fails to broadcast when passed to where; broadcast manually.\n mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T\n filtered = self._selected_obj.where(mask) # Fill with NaNs.\n return filtered\n\n\nclass GroupBy(_GroupBy):\n\n \"\"\"\n Class for grouping and aggregating relational data.\n\n See aggregate, transform, and apply functions on this object.\n\n It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:\n\n ::\n\n grouped = groupby(obj, ...)\n\n Parameters\n ----------\n obj : pandas object\n axis : int, default 0\n level : int, default None\n Level of MultiIndex\n groupings : list of Grouping objects\n Most users should ignore this\n exclusions : array-like, optional\n List of columns to exclude\n name : string\n Most users should ignore this\n\n Returns\n -------\n **Attributes**\n groups : dict\n {group name -> group labels}\n len(grouped) : int\n Number of groups\n\n Notes\n -----\n After grouping, see aggregate, apply, and transform functions. Here are\n some other brief notes about usage. When grouping by multiple groups, the\n result index will be a MultiIndex (hierarchical) by default.\n\n Iteration produces (key, group) tuples, i.e. chunking the data by group. So\n you can write code like:\n\n ::\n\n grouped = obj.groupby(keys, axis=axis)\n for key, group in grouped:\n # do something with the data\n\n Function calls on GroupBy, if not specially implemented, \"dispatch\" to the\n grouped data. So if you group a DataFrame and wish to invoke the std()\n method on each group, you can simply do:\n\n ::\n\n df.groupby(mapper).std()\n\n rather than\n\n ::\n\n df.groupby(mapper).aggregate(np.std)\n\n You can pass arguments to these \"wrapped\" functions, too.\n\n See the online documentation for full exposition on these topics and much\n more\n \"\"\"\n def _bool_agg(self, val_test, skipna):\n \"\"\"\n Shared func to call any / all Cython GroupBy implementations.\n \"\"\"\n\n def objs_to_bool(vals):\n try:\n vals = vals.astype(np.bool)\n except ValueError: # for objects\n vals = np.array([bool(x) for x in vals])\n\n return vals.view(np.uint8)\n\n def result_to_bool(result):\n return result.astype(np.bool, copy=False)\n\n return self._get_cythonized_result('group_any_all', self.grouper,\n aggregate=True,\n cython_dtype=np.uint8,\n needs_values=True,\n needs_mask=True,\n pre_processing=objs_to_bool,\n post_processing=result_to_bool,\n val_test=val_test, skipna=skipna)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def any(self, skipna=True):\n \"\"\"\n Returns True if any value in the group is truthful, else False.\n\n Parameters\n ----------\n skipna : bool, default True\n Flag to ignore nan values during truth testing\n \"\"\"\n return self._bool_agg('any', skipna)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def all(self, skipna=True):\n \"\"\"\n Returns True if all values in the group are truthful, else False.\n\n Parameters\n ----------\n skipna : bool, default True\n Flag to ignore nan values during truth testing\n \"\"\"\n return self._bool_agg('all', skipna)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def count(self):\n \"\"\"\n Compute count of group, excluding missing values.\n \"\"\"\n\n # defined here for API doc\n raise NotImplementedError\n\n @Substitution(name='groupby', see_also=_common_see_also)\n def mean(self, *args, **kwargs):\n \"\"\"\n Compute mean of groups, excluding missing values.\n\n Returns\n -------\n pandas.Series or pandas.DataFrame\n\n %(see_also)s\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],\n ... 'B': [np.nan, 2, 3, 4, 5],\n ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])\n\n Groupby one column and return the mean of the remaining columns in\n each group.\n\n >>> df.groupby('A').mean()\n >>>\n B C\n A\n 1 3.0 1.333333\n 2 4.0 1.500000\n\n Groupby two columns and return the mean of the remaining column.\n\n >>> df.groupby(['A', 'B']).mean()\n >>>\n C\n A B\n 1 2.0 2\n 4.0 1\n 2 3.0 1\n 5.0 2\n\n Groupby one column and return the mean of only particular column in\n the group.\n\n >>> df.groupby('A')['B'].mean()\n >>>\n A\n 1 3.0\n 2 4.0\n Name: B, dtype: float64\n \"\"\"\n nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])\n try:\n return self._cython_agg_general('mean', **kwargs)\n except GroupByError:\n raise\n except Exception: # pragma: no cover\n with _group_selection_context(self):\n f = lambda x: x.mean(axis=self.axis, **kwargs)\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def median(self, **kwargs):\n \"\"\"\n Compute median of groups, excluding missing values.\n\n For multiple groupings, the result index will be a MultiIndex\n \"\"\"\n try:\n return self._cython_agg_general('median', **kwargs)\n except GroupByError:\n raise\n except Exception: # pragma: no cover\n\n def f(x):\n if isinstance(x, np.ndarray):\n x = Series(x)\n return x.median(axis=self.axis, **kwargs)\n with _group_selection_context(self):\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def std(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute standard deviation of groups, excluding missing values.\n\n For multiple groupings, the result index will be a MultiIndex.\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n\n # TODO: implement at Cython level?\n nv.validate_groupby_func('std', args, kwargs)\n return np.sqrt(self.var(ddof=ddof, **kwargs))\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def var(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute variance of groups, excluding missing values.\n\n For multiple groupings, the result index will be a MultiIndex.\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n nv.validate_groupby_func('var', args, kwargs)\n if ddof == 1:\n try:\n return self._cython_agg_general('var', **kwargs)\n except Exception:\n f = lambda x: x.var(ddof=ddof, **kwargs)\n with _group_selection_context(self):\n return self._python_agg_general(f)\n else:\n f = lambda x: x.var(ddof=ddof, **kwargs)\n with _group_selection_context(self):\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def sem(self, ddof=1):\n \"\"\"\n Compute standard error of the mean of groups, excluding missing values.\n\n For multiple groupings, the result index will be a MultiIndex.\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n\n return self.std(ddof=ddof) / np.sqrt(self.count())\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def size(self):\n \"\"\"\n Compute group sizes.\n \"\"\"\n result = self.grouper.size()\n\n if isinstance(self.obj, Series):\n result.name = getattr(self.obj, 'name', None)\n return result\n\n @classmethod\n def _add_numeric_operations(cls):\n \"\"\"\n Add numeric operations to the GroupBy generically.\n \"\"\"\n\n def groupby_function(name, alias, npfunc,\n numeric_only=True, _convert=False,\n min_count=-1):\n\n _local_template = \"Compute %(f)s of group values\"\n\n @Substitution(name='groupby', f=name)\n @Appender(_common_see_also)\n @Appender(_local_template)\n def f(self, **kwargs):\n if 'numeric_only' not in kwargs:\n kwargs['numeric_only'] = numeric_only\n if 'min_count' not in kwargs:\n kwargs['min_count'] = min_count\n\n self._set_group_selection()\n try:\n return self._cython_agg_general(\n alias, alt=npfunc, **kwargs)\n except AssertionError as e:\n raise SpecificationError(str(e))\n except Exception:\n result = self.aggregate(\n lambda x: npfunc(x, axis=self.axis))\n if _convert:\n result = result._convert(datetime=True)\n return result\n\n set_function_name(f, name, cls)\n\n return f\n\n def first_compat(x, axis=0):\n\n def first(x):\n\n x = np.asarray(x)\n x = x[notna(x)]\n if len(x) == 0:\n return np.nan\n return x[0]\n\n if isinstance(x, DataFrame):\n return x.apply(first, axis=axis)\n else:\n return first(x)\n\n def last_compat(x, axis=0):\n\n def last(x):\n\n x = np.asarray(x)\n x = x[notna(x)]\n if len(x) == 0:\n return np.nan\n return x[-1]\n\n if isinstance(x, DataFrame):\n return x.apply(last, axis=axis)\n else:\n return last(x)\n\n cls.sum = groupby_function('sum', 'add', np.sum, min_count=0)\n cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0)\n cls.min = groupby_function('min', 'min', np.min, numeric_only=False)\n cls.max = groupby_function('max', 'max', np.max, numeric_only=False)\n cls.first = groupby_function('first', 'first', first_compat,\n numeric_only=False)\n cls.last = groupby_function('last', 'last', last_compat,\n numeric_only=False)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def ohlc(self):\n \"\"\"\n Compute sum of values, excluding missing values.\n\n For multiple groupings, the result index will be a MultiIndex\n \"\"\"\n\n return self._apply_to_column_groupbys(\n lambda x: x._cython_agg_general('ohlc'))\n\n @Appender(DataFrame.describe.__doc__)\n def describe(self, **kwargs):\n with _group_selection_context(self):\n result = self.apply(lambda x: x.describe(**kwargs))\n if self.axis == 1:\n return result.T\n return result.unstack()\n\n def resample(self, rule, *args, **kwargs):\n \"\"\"\n Provide resampling when using a TimeGrouper.\n\n Given a grouper, the function resamples it according to a string\n \"string\" -> \"frequency\".\n\n See the :ref:`frequency aliases <timeseries.offset-aliases>`\n documentation for more details.\n\n Parameters\n ----------\n rule : str or DateOffset\n The offset string or object representing target grouper conversion.\n *args, **kwargs\n Possible arguments are `how`, `fill_method`, `limit`, `kind` and\n `on`, and other arguments of `TimeGrouper`.\n\n Returns\n -------\n Grouper\n Return a new grouper with our resampler appended.\n\n See Also\n --------\n pandas.Grouper : Specify a frequency to resample with when\n grouping by a key.\n DatetimeIndex.resample : Frequency conversion and resampling of\n time series.\n\n Examples\n --------\n >>> idx = pd.date_range('1/1/2000', periods=4, freq='T')\n >>> df = pd.DataFrame(data=4 * [range(2)],\n ... index=idx,\n ... columns=['a', 'b'])\n >>> df.iloc[2, 0] = 5\n >>> df\n a b\n 2000-01-01 00:00:00 0 1\n 2000-01-01 00:01:00 0 1\n 2000-01-01 00:02:00 5 1\n 2000-01-01 00:03:00 0 1\n\n Downsample the DataFrame into 3 minute bins and sum the values of\n the timestamps falling into a bin.\n\n >>> df.groupby('a').resample('3T').sum()\n a b\n a\n 0 2000-01-01 00:00:00 0 2\n 2000-01-01 00:03:00 0 1\n 5 2000-01-01 00:00:00 5 1\n\n Upsample the series into 30 second bins.\n\n >>> df.groupby('a').resample('30S').sum()\n a b\n a\n 0 2000-01-01 00:00:00 0 1\n 2000-01-01 00:00:30 0 0\n 2000-01-01 00:01:00 0 1\n 2000-01-01 00:01:30 0 0\n 2000-01-01 00:02:00 0 0\n 2000-01-01 00:02:30 0 0\n 2000-01-01 00:03:00 0 1\n 5 2000-01-01 00:02:00 5 1\n\n Resample by month. Values are assigned to the month of the period.\n\n >>> df.groupby('a').resample('M').sum()\n a b\n a\n 0 2000-01-31 0 3\n 5 2000-01-31 5 1\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> df.groupby('a').resample('3T', closed='right').sum()\n a b\n a\n 0 1999-12-31 23:57:00 0 1\n 2000-01-01 00:00:00 0 2\n 5 2000-01-01 00:00:00 5 1\n\n Downsample the series into 3 minute bins and close the right side of\n the bin interval, but label each bin using the right edge instead of\n the left.\n\n >>> df.groupby('a').resample('3T', closed='right', label='right').sum()\n a b\n a\n 0 2000-01-01 00:00:00 0 1\n 2000-01-01 00:03:00 0 2\n 5 2000-01-01 00:03:00 5 1\n\n Add an offset of twenty seconds.\n\n >>> df.groupby('a').resample('3T', loffset='20s').sum()\n a b\n a\n 0 2000-01-01 00:00:20 0 2\n 2000-01-01 00:03:20 0 1\n 5 2000-01-01 00:00:20 5 1\n \"\"\"\n from pandas.core.resample import get_resampler_for_grouping\n return get_resampler_for_grouping(self, rule, *args, **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def rolling(self, *args, **kwargs):\n \"\"\"\n Return a rolling grouper, providing rolling functionality per group.\n \"\"\"\n from pandas.core.window import RollingGroupby\n return RollingGroupby(self, *args, **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def expanding(self, *args, **kwargs):\n \"\"\"\n Return an expanding grouper, providing expanding\n functionality per group.\n \"\"\"\n from pandas.core.window import ExpandingGroupby\n return ExpandingGroupby(self, *args, **kwargs)\n\n def _fill(self, direction, limit=None):\n \"\"\"\n Shared function for `pad` and `backfill` to call Cython method.\n\n Parameters\n ----------\n direction : {'ffill', 'bfill'}\n Direction passed to underlying Cython function. `bfill` will cause\n values to be filled backwards. `ffill` and any other values will\n default to a forward fill\n limit : int, default None\n Maximum number of consecutive values to fill. If `None`, this\n method will convert to -1 prior to passing to Cython\n\n Returns\n -------\n `Series` or `DataFrame` with filled values\n\n See Also\n --------\n pad\n backfill\n \"\"\"\n # Need int value for Cython\n if limit is None:\n limit = -1\n\n return self._get_cythonized_result('group_fillna_indexer',\n self.grouper, needs_mask=True,\n cython_dtype=np.int64,\n result_is_index=True,\n direction=direction, limit=limit)\n\n @Substitution(name='groupby')\n def pad(self, limit=None):\n \"\"\"\n Forward fill the values.\n\n Parameters\n ----------\n limit : integer, optional\n limit of how many values to fill\n\n See Also\n --------\n Series.pad\n DataFrame.pad\n Series.fillna\n DataFrame.fillna\n \"\"\"\n return self._fill('ffill', limit=limit)\n ffill = pad\n\n @Substitution(name='groupby')\n def backfill(self, limit=None):\n \"\"\"\n Backward fill the values.\n\n Parameters\n ----------\n limit : integer, optional\n limit of how many values to fill\n\n See Also\n --------\n Series.backfill\n DataFrame.backfill\n Series.fillna\n DataFrame.fillna\n \"\"\"\n return self._fill('bfill', limit=limit)\n bfill = backfill\n\n @Substitution(name='groupby', see_also=_common_see_also)\n def nth(self, n, dropna=None):\n \"\"\"\n Take the nth row from each group if n is an int, or a subset of rows\n if n is a list of ints.\n\n If dropna, will take the nth non-null row, dropna is either\n Truthy (if a Series) or 'all', 'any' (if a DataFrame);\n this is equivalent to calling dropna(how=dropna) before the\n groupby.\n\n Parameters\n ----------\n n : int or list of ints\n a single nth value for the row or a list of nth values\n dropna : None or str, optional\n apply the specified dropna operation before counting which row is\n the nth row. Needs to be None, 'any' or 'all'\n\n %(see_also)s\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],\n ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])\n >>> g = df.groupby('A')\n >>> g.nth(0)\n B\n A\n 1 NaN\n 2 3.0\n >>> g.nth(1)\n B\n A\n 1 2.0\n 2 5.0\n >>> g.nth(-1)\n B\n A\n 1 4.0\n 2 5.0\n >>> g.nth([0, 1])\n B\n A\n 1 NaN\n 1 2.0\n 2 3.0\n 2 5.0\n\n Specifying `dropna` allows count ignoring ``NaN``\n\n >>> g.nth(0, dropna='any')\n B\n A\n 1 2.0\n 2 3.0\n\n NaNs denote group exhausted when using dropna\n\n >>> g.nth(3, dropna='any')\n B\n A\n 1 NaN\n 2 NaN\n\n Specifying `as_index=False` in `groupby` keeps the original index.\n\n >>> df.groupby('A', as_index=False).nth(1)\n A B\n 1 1 2.0\n 4 2 5.0\n \"\"\"\n\n if isinstance(n, int):\n nth_values = [n]\n elif isinstance(n, (set, list, tuple)):\n nth_values = list(set(n))\n if dropna is not None:\n raise ValueError(\n \"dropna option with a list of nth values is not supported\")\n else:\n raise TypeError(\"n needs to be an int or a list/set/tuple of ints\")\n\n nth_values = np.array(nth_values, dtype=np.intp)\n self._set_group_selection()\n\n if not dropna:\n mask_left = np.in1d(self._cumcount_array(), nth_values)\n mask_right = np.in1d(self._cumcount_array(ascending=False) + 1,\n -nth_values)\n mask = mask_left | mask_right\n\n out = self._selected_obj[mask]\n if not self.as_index:\n return out\n\n ids, _, _ = self.grouper.group_info\n out.index = self.grouper.result_index[ids[mask]]\n\n return out.sort_index() if self.sort else out\n\n if dropna not in ['any', 'all']:\n if isinstance(self._selected_obj, Series) and dropna is True:\n warnings.warn(\"the dropna={dropna} keyword is deprecated,\"\n \"use dropna='all' instead. \"\n \"For a Series groupby, dropna must be \"\n \"either None, 'any' or 'all'.\".format(\n dropna=dropna),\n FutureWarning,\n stacklevel=2)\n dropna = 'all'\n else:\n # Note: when agg-ing picker doesn't raise this,\n # just returns NaN\n raise ValueError(\"For a DataFrame groupby, dropna must be \"\n \"either None, 'any' or 'all', \"\n \"(was passed {dropna}).\".format(\n dropna=dropna))\n\n # old behaviour, but with all and any support for DataFrames.\n # modified in GH 7559 to have better perf\n max_len = n if n >= 0 else - 1 - n\n dropped = self.obj.dropna(how=dropna, axis=self.axis)\n\n # get a new grouper for our dropped obj\n if self.keys is None and self.level is None:\n\n # we don't have the grouper info available\n # (e.g. we have selected out\n # a column that is not in the current object)\n axis = self.grouper.axis\n grouper = axis[axis.isin(dropped.index)]\n\n else:\n\n # create a grouper with the original parameters, but on the dropped\n # object\n from pandas.core.groupby.grouper import _get_grouper\n grouper, _, _ = _get_grouper(dropped, key=self.keys,\n axis=self.axis, level=self.level,\n sort=self.sort,\n mutated=self.mutated)\n\n grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)\n sizes, result = grb.size(), grb.nth(n)\n mask = (sizes < max_len).values\n\n # set the results which don't meet the criteria\n if len(result) and mask.any():\n result.loc[mask] = np.nan\n\n # reset/reindex to the original groups\n if (len(self.obj) == len(dropped) or\n len(result) == len(self.grouper.result_index)):\n result.index = self.grouper.result_index\n else:\n result = result.reindex(self.grouper.result_index)\n\n return result\n\n @Substitution(name='groupby')\n def ngroup(self, ascending=True):\n \"\"\"\n Number each group from 0 to the number of groups - 1.\n\n This is the enumerative complement of cumcount. Note that the\n numbers given to the groups match the order in which the groups\n would be seen when iterating over the groupby object, not the\n order they are first observed.\n\n .. versionadded:: 0.20.2\n\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from number of group - 1 to 0.\n\n See Also\n --------\n .cumcount : Number the rows in each group.\n\n Examples\n --------\n\n >>> df = pd.DataFrame({\"A\": list(\"aaabba\")})\n >>> df\n A\n 0 a\n 1 a\n 2 a\n 3 b\n 4 b\n 5 a\n >>> df.groupby('A').ngroup()\n 0 0\n 1 0\n 2 0\n 3 1\n 4 1\n 5 0\n dtype: int64\n >>> df.groupby('A').ngroup(ascending=False)\n 0 1\n 1 1\n 2 1\n 3 0\n 4 0\n 5 1\n dtype: int64\n >>> df.groupby([\"A\", [1,1,2,3,2,1]]).ngroup()\n 0 0\n 1 0\n 2 1\n 3 3\n 4 2\n 5 0\n dtype: int64\n \"\"\"\n\n with _group_selection_context(self):\n index = self._selected_obj.index\n result = Series(self.grouper.group_info[0], index)\n if not ascending:\n result = self.ngroups - 1 - result\n return result\n\n @Substitution(name='groupby')\n def cumcount(self, ascending=True):\n \"\"\"\n Number each item in each group from 0 to the length of that group - 1.\n\n Essentially this is equivalent to\n\n >>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))\n\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from length of group - 1 to 0.\n\n See Also\n --------\n .ngroup : Number the groups themselves.\n\n Examples\n --------\n\n >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],\n ... columns=['A'])\n >>> df\n A\n 0 a\n 1 a\n 2 a\n 3 b\n 4 b\n 5 a\n >>> df.groupby('A').cumcount()\n 0 0\n 1 1\n 2 2\n 3 0\n 4 1\n 5 3\n dtype: int64\n >>> df.groupby('A').cumcount(ascending=False)\n 0 3\n 1 2\n 2 1\n 3 1\n 4 0\n 5 0\n dtype: int64\n \"\"\"\n\n with _group_selection_context(self):\n index = self._selected_obj.index\n cumcounts = self._cumcount_array(ascending=ascending)\n return Series(cumcounts, index)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def rank(self, method='average', ascending=True, na_option='keep',\n pct=False, axis=0):\n \"\"\"\n Provides the rank of values within each group.\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n na_option : {'keep', 'top', 'bottom'}, default 'keep'\n * keep: leave NA values where they are\n * top: smallest rank if ascending\n * bottom: smallest rank if descending\n pct : boolean, default False\n Compute percentage rank of data within each group\n axis : int, default 0\n The axis of the object over which to compute the rank.\n\n Returns\n -----\n DataFrame with ranking of values within each group\n \"\"\"\n if na_option not in {'keep', 'top', 'bottom'}:\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n raise ValueError(msg)\n return self._cython_transform('rank', numeric_only=False,\n ties_method=method, ascending=ascending,\n na_option=na_option, pct=pct, axis=axis)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def cumprod(self, axis=0, *args, **kwargs):\n \"\"\"\n Cumulative product for each group.\n \"\"\"\n nv.validate_groupby_func('cumprod', args, kwargs,\n ['numeric_only', 'skipna'])\n if axis != 0:\n return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))\n\n return self._cython_transform('cumprod', **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def cumsum(self, axis=0, *args, **kwargs):\n \"\"\"\n Cumulative sum for each group.\n \"\"\"\n nv.validate_groupby_func('cumsum', args, kwargs,\n ['numeric_only', 'skipna'])\n if axis != 0:\n return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))\n\n return self._cython_transform('cumsum', **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def cummin(self, axis=0, **kwargs):\n \"\"\"\n Cumulative min for each group.\n \"\"\"\n if axis != 0:\n return self.apply(lambda x: np.minimum.accumulate(x, axis))\n\n return self._cython_transform('cummin', numeric_only=False)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def cummax(self, axis=0, **kwargs):\n \"\"\"\n Cumulative max for each group.\n \"\"\"\n if axis != 0:\n return self.apply(lambda x: np.maximum.accumulate(x, axis))\n\n return self._cython_transform('cummax', numeric_only=False)\n\n def _get_cythonized_result(self, how, grouper, aggregate=False,\n cython_dtype=None, needs_values=False,\n needs_mask=False, needs_ngroups=False,\n result_is_index=False,\n pre_processing=None, post_processing=None,\n **kwargs):\n \"\"\"\n Get result for Cythonized functions.\n\n Parameters\n ----------\n how : str, Cythonized function name to be called\n grouper : Grouper object containing pertinent group info\n aggregate : bool, default False\n Whether the result should be aggregated to match the number of\n groups\n cython_dtype : default None\n Type of the array that will be modified by the Cython call. If\n `None`, the type will be inferred from the values of each slice\n needs_values : bool, default False\n Whether the values should be a part of the Cython call\n signature\n needs_mask : bool, default False\n Whether boolean mask needs to be part of the Cython call\n signature\n needs_ngroups : bool, default False\n Whether number of groups is part of the Cython call signature\n result_is_index : bool, default False\n Whether the result of the Cython operation is an index of\n values to be retrieved, instead of the actual values themselves\n pre_processing : function, default None\n Function to be applied to `values` prior to passing to Cython\n Raises if `needs_values` is False\n post_processing : function, default None\n Function to be applied to result of Cython function\n **kwargs : dict\n Extra arguments to be passed back to Cython funcs\n\n Returns\n -------\n `Series` or `DataFrame` with filled values\n \"\"\"\n if result_is_index and aggregate:\n raise ValueError(\"'result_is_index' and 'aggregate' cannot both \"\n \"be True!\")\n if post_processing:\n if not callable(pre_processing):\n raise ValueError(\"'post_processing' must be a callable!\")\n if pre_processing:\n if not callable(pre_processing):\n raise ValueError(\"'pre_processing' must be a callable!\")\n if not needs_values:\n raise ValueError(\"Cannot use 'pre_processing' without \"\n \"specifying 'needs_values'!\")\n\n labels, _, ngroups = grouper.group_info\n output = collections.OrderedDict()\n base_func = getattr(libgroupby, how)\n\n for name, obj in self._iterate_slices():\n if aggregate:\n result_sz = ngroups\n else:\n result_sz = len(obj.values)\n\n if not cython_dtype:\n cython_dtype = obj.values.dtype\n\n result = np.zeros(result_sz, dtype=cython_dtype)\n func = partial(base_func, result, labels)\n if needs_values:\n vals = obj.values\n if pre_processing:\n vals = pre_processing(vals)\n func = partial(func, vals)\n\n if needs_mask:\n mask = isna(obj.values).view(np.uint8)\n func = partial(func, mask)\n\n if needs_ngroups:\n func = partial(func, ngroups)\n\n func(**kwargs) # Call func to modify indexer values in place\n\n if result_is_index:\n result = algorithms.take_nd(obj.values, result)\n\n if post_processing:\n result = post_processing(result)\n\n output[name] = result\n\n if aggregate:\n return self._wrap_aggregated_output(output)\n else:\n return self._wrap_transformed_output(output)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def shift(self, periods=1, freq=None, axis=0):\n \"\"\"\n Shift each group by periods observations.\n\n Parameters\n ----------\n periods : integer, default 1\n number of periods to shift\n freq : frequency string\n axis : axis to shift, default 0\n \"\"\"\n\n if freq is not None or axis != 0:\n return self.apply(lambda x: x.shift(periods, freq, axis))\n\n return self._get_cythonized_result('group_shift_indexer',\n self.grouper, cython_dtype=np.int64,\n needs_ngroups=True,\n result_is_index=True,\n periods=periods)\n\n @Substitution(name='groupby')\n @Appender(_common_see_also)\n def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,\n axis=0):\n \"\"\"\n Calculate pct_change of each value to previous entry in group.\n \"\"\"\n if freq is not None or axis != 0:\n return self.apply(lambda x: x.pct_change(periods=periods,\n fill_method=fill_method,\n limit=limit, freq=freq,\n axis=axis))\n filled = getattr(self, fill_method)(limit=limit)\n filled = filled.drop(self.grouper.names, axis=1)\n fill_grp = filled.groupby(self.grouper.labels)\n shifted = fill_grp.shift(periods=periods, freq=freq)\n return (filled / shifted) - 1\n\n @Substitution(name='groupby', see_also=_common_see_also)\n def head(self, n=5):\n \"\"\"\n Returns first n rows of each group.\n\n Essentially equivalent to ``.apply(lambda x: x.head(n))``,\n except ignores as_index flag.\n\n %(see_also)s\n\n Examples\n --------\n\n >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],\n columns=['A', 'B'])\n >>> df.groupby('A', as_index=False).head(1)\n A B\n 0 1 2\n 2 5 6\n >>> df.groupby('A').head(1)\n A B\n 0 1 2\n 2 5 6\n \"\"\"\n self._reset_group_selection()\n mask = self._cumcount_array() < n\n return self._selected_obj[mask]\n\n @Substitution(name='groupby', see_also=_common_see_also)\n def tail(self, n=5):\n \"\"\"\n Returns last n rows of each group.\n\n Essentially equivalent to ``.apply(lambda x: x.tail(n))``,\n except ignores as_index flag.\n\n %(see_also)s\n\n Examples\n --------\n\n >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],\n columns=['A', 'B'])\n >>> df.groupby('A').tail(1)\n A B\n 1 a 2\n 3 b 2\n >>> df.groupby('A').head(1)\n A B\n 0 a 1\n 2 b 1\n \"\"\"\n self._reset_group_selection()\n mask = self._cumcount_array(ascending=False) < n\n return self._selected_obj[mask]\n\n\nGroupBy._add_numeric_operations()\n\n\n@Appender(GroupBy.__doc__)\ndef groupby(obj, by, **kwds):\n if isinstance(obj, Series):\n from pandas.core.groupby.generic import SeriesGroupBy\n klass = SeriesGroupBy\n elif isinstance(obj, DataFrame):\n from pandas.core.groupby.generic import DataFrameGroupBy\n klass = DataFrameGroupBy\n else: # pragma: no cover\n raise TypeError('invalid type: {}'.format(obj))\n\n return klass(obj, by, **kwds)\n",
"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_float_dtype, is_integer_dtype\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nimport pandas as pd\nfrom pandas import (\n Categorical, CategoricalIndex, DatetimeIndex, Index, Interval,\n IntervalIndex, NaT, Series, Timestamp, date_range, period_range,\n timedelta_range)\nimport pandas.util.testing as tm\n\n\nclass TestCategoricalConstructors(object):\n\n def test_validate_ordered(self):\n # see gh-14058\n exp_msg = \"'ordered' must either be 'True' or 'False'\"\n exp_err = TypeError\n\n # This should be a boolean.\n ordered = np.array([0, 1, 2])\n\n with pytest.raises(exp_err, match=exp_msg):\n Categorical([1, 2, 3], ordered=ordered)\n\n with pytest.raises(exp_err, match=exp_msg):\n Categorical.from_codes([0, 0, 1], categories=['a', 'b', 'c'],\n ordered=ordered)\n\n def test_constructor_empty(self):\n # GH 17248\n c = Categorical([])\n expected = Index([])\n tm.assert_index_equal(c.categories, expected)\n\n c = Categorical([], categories=[1, 2, 3])\n expected = pd.Int64Index([1, 2, 3])\n tm.assert_index_equal(c.categories, expected)\n\n def test_constructor_empty_boolean(self):\n # see gh-22702\n cat = pd.Categorical([], categories=[True, False])\n categories = sorted(cat.categories.tolist())\n assert categories == [False, True]\n\n def test_constructor_tuples(self):\n values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)\n result = Categorical(values)\n expected = Index([(1,), (1, 2)], tupleize_cols=False)\n tm.assert_index_equal(result.categories, expected)\n assert result.ordered is False\n\n def test_constructor_tuples_datetimes(self):\n # numpy will auto reshape when all of the tuples are the\n # same len, so add an extra one with 2 items and slice it off\n values = np.array([(Timestamp('2010-01-01'),),\n (Timestamp('2010-01-02'),),\n (Timestamp('2010-01-01'),),\n (Timestamp('2010-01-02'),),\n ('a', 'b')], dtype=object)[:-1]\n result = Categorical(values)\n expected = Index([(Timestamp('2010-01-01'),),\n (Timestamp('2010-01-02'),)], tupleize_cols=False)\n tm.assert_index_equal(result.categories, expected)\n\n def test_constructor_unsortable(self):\n\n # it works!\n arr = np.array([1, 2, 3, datetime.now()], dtype='O')\n factor = Categorical(arr, ordered=False)\n assert not factor.ordered\n\n # this however will raise as cannot be sorted\n with pytest.raises(TypeError):\n Categorical(arr, ordered=True)\n\n def test_constructor_interval(self):\n result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)],\n ordered=True)\n ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)])\n exp = Categorical(ii, ordered=True)\n tm.assert_categorical_equal(result, exp)\n tm.assert_index_equal(result.categories, ii)\n\n def test_constructor(self):\n\n exp_arr = np.array([\"a\", \"b\", \"c\", \"a\", \"b\", \"c\"], dtype=np.object_)\n c1 = Categorical(exp_arr)\n tm.assert_numpy_array_equal(c1.__array__(), exp_arr)\n c2 = Categorical(exp_arr, categories=[\"a\", \"b\", \"c\"])\n tm.assert_numpy_array_equal(c2.__array__(), exp_arr)\n c2 = Categorical(exp_arr, categories=[\"c\", \"b\", \"a\"])\n tm.assert_numpy_array_equal(c2.__array__(), exp_arr)\n\n # categories must be unique\n with pytest.raises(ValueError):\n Categorical([1, 2], [1, 2, 2])\n\n with pytest.raises(ValueError):\n Categorical([\"a\", \"b\"], [\"a\", \"b\", \"b\"])\n\n # The default should be unordered\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"])\n assert not c1.ordered\n\n # Categorical as input\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"])\n c2 = Categorical(c1)\n tm.assert_categorical_equal(c1, c2)\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n c2 = Categorical(c1)\n tm.assert_categorical_equal(c1, c2)\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"c\", \"b\"])\n c2 = Categorical(c1)\n tm.assert_categorical_equal(c1, c2)\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"c\", \"b\"])\n c2 = Categorical(c1, categories=[\"a\", \"b\", \"c\"])\n tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())\n tm.assert_index_equal(c2.categories, Index([\"a\", \"b\", \"c\"]))\n\n # Series of dtype category\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n c2 = Categorical(Series(c1))\n tm.assert_categorical_equal(c1, c2)\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"c\", \"b\"])\n c2 = Categorical(Series(c1))\n tm.assert_categorical_equal(c1, c2)\n\n # Series\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"])\n c2 = Categorical(Series([\"a\", \"b\", \"c\", \"a\"]))\n tm.assert_categorical_equal(c1, c2)\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n c2 = Categorical(Series([\"a\", \"b\", \"c\", \"a\"]),\n categories=[\"a\", \"b\", \"c\", \"d\"])\n tm.assert_categorical_equal(c1, c2)\n\n # This should result in integer categories, not float!\n cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])\n assert is_integer_dtype(cat.categories)\n\n # https://github.com/pandas-dev/pandas/issues/3678\n cat = Categorical([np.nan, 1, 2, 3])\n assert is_integer_dtype(cat.categories)\n\n # this should result in floats\n cat = Categorical([np.nan, 1, 2., 3])\n assert is_float_dtype(cat.categories)\n\n cat = Categorical([np.nan, 1., 2., 3.])\n assert is_float_dtype(cat.categories)\n\n # This doesn't work -> this would probably need some kind of \"remember\n # the original type\" feature to try to cast the array interface result\n # to...\n\n # vals = np.asarray(cat[cat.notna()])\n # assert is_integer_dtype(vals)\n\n # corner cases\n cat = Categorical([1])\n assert len(cat.categories) == 1\n assert cat.categories[0] == 1\n assert len(cat.codes) == 1\n assert cat.codes[0] == 0\n\n cat = Categorical([\"a\"])\n assert len(cat.categories) == 1\n assert cat.categories[0] == \"a\"\n assert len(cat.codes) == 1\n assert cat.codes[0] == 0\n\n # Scalars should be converted to lists\n cat = Categorical(1)\n assert len(cat.categories) == 1\n assert cat.categories[0] == 1\n assert len(cat.codes) == 1\n assert cat.codes[0] == 0\n\n # two arrays\n # - when the first is an integer dtype and the second is not\n # - when the resulting codes are all -1/NaN\n with tm.assert_produces_warning(None):\n c_old = Categorical([0, 1, 2, 0, 1, 2],\n categories=[\"a\", \"b\", \"c\"]) # noqa\n\n with tm.assert_produces_warning(None):\n c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa\n categories=[3, 4, 5])\n\n # the next one are from the old docs\n with tm.assert_produces_warning(None):\n c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa\n cat = Categorical([1, 2], categories=[1, 2, 3])\n\n # this is a legitimate constructor\n with tm.assert_produces_warning(None):\n c = Categorical(np.array([], dtype='int64'), # noqa\n categories=[3, 2, 1], ordered=True)\n\n def test_constructor_not_sequence(self):\n # https://github.com/pandas-dev/pandas/issues/16022\n with pytest.raises(TypeError):\n Categorical(['a', 'b'], categories='a')\n\n def test_constructor_with_null(self):\n\n # Cannot have NaN in categories\n with pytest.raises(ValueError):\n Categorical([np.nan, \"a\", \"b\", \"c\"],\n categories=[np.nan, \"a\", \"b\", \"c\"])\n\n with pytest.raises(ValueError):\n Categorical([None, \"a\", \"b\", \"c\"],\n categories=[None, \"a\", \"b\", \"c\"])\n\n with pytest.raises(ValueError):\n Categorical(DatetimeIndex(['nat', '20160101']),\n categories=[NaT, Timestamp('20160101')])\n\n def test_constructor_with_index(self):\n ci = CategoricalIndex(list('aabbca'), categories=list('cab'))\n tm.assert_categorical_equal(ci.values, Categorical(ci))\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cab'))\n tm.assert_categorical_equal(ci.values,\n Categorical(ci.astype(object),\n categories=ci.categories))\n\n def test_constructor_with_generator(self):\n # This was raising an Error in isna(single_val).any() because isna\n # returned a scalar for a generator\n xrange = range\n\n exp = Categorical([0, 1, 2])\n cat = Categorical((x for x in [0, 1, 2]))\n tm.assert_categorical_equal(cat, exp)\n cat = Categorical(xrange(3))\n tm.assert_categorical_equal(cat, exp)\n\n # This uses xrange internally\n from pandas.core.index import MultiIndex\n MultiIndex.from_product([range(5), ['a', 'b', 'c']])\n\n # check that categories accept generators and sequences\n cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))\n tm.assert_categorical_equal(cat, exp)\n cat = Categorical([0, 1, 2], categories=xrange(3))\n tm.assert_categorical_equal(cat, exp)\n\n @pytest.mark.parametrize(\"dtl\", [\n date_range(\"1995-01-01 00:00:00\", periods=5, freq=\"s\"),\n date_range(\"1995-01-01 00:00:00\", periods=5,\n freq=\"s\", tz=\"US/Eastern\"),\n timedelta_range(\"1 day\", periods=5, freq=\"s\")\n ])\n def test_constructor_with_datetimelike(self, dtl):\n # see gh-12077\n # constructor with a datetimelike and NaT\n\n s = Series(dtl)\n c = Categorical(s)\n\n expected = type(dtl)(s)\n expected.freq = None\n\n tm.assert_index_equal(c.categories, expected)\n tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype=\"int8\"))\n\n # with NaT\n s2 = s.copy()\n s2.iloc[-1] = NaT\n c = Categorical(s2)\n\n expected = type(dtl)(s2.dropna())\n expected.freq = None\n\n tm.assert_index_equal(c.categories, expected)\n\n exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)\n tm.assert_numpy_array_equal(c.codes, exp)\n\n result = repr(c)\n assert \"NaT\" in result\n\n def test_constructor_from_index_series_datetimetz(self):\n idx = date_range('2015-01-01 10:00', freq='D', periods=3,\n tz='US/Eastern')\n result = Categorical(idx)\n tm.assert_index_equal(result.categories, idx)\n\n result = Categorical(Series(idx))\n tm.assert_index_equal(result.categories, idx)\n\n def test_constructor_from_index_series_timedelta(self):\n idx = timedelta_range('1 days', freq='D', periods=3)\n result = Categorical(idx)\n tm.assert_index_equal(result.categories, idx)\n\n result = Categorical(Series(idx))\n tm.assert_index_equal(result.categories, idx)\n\n def test_constructor_from_index_series_period(self):\n idx = period_range('2015-01-01', freq='D', periods=3)\n result = Categorical(idx)\n tm.assert_index_equal(result.categories, idx)\n\n result = Categorical(Series(idx))\n tm.assert_index_equal(result.categories, idx)\n\n def test_constructor_invariant(self):\n # GH 14190\n vals = [\n np.array([1., 1.2, 1.8, np.nan]),\n np.array([1, 2, 3], dtype='int64'),\n ['a', 'b', 'c', np.nan],\n [pd.Period('2014-01'), pd.Period('2014-02'), NaT],\n [Timestamp('2014-01-01'), Timestamp('2014-01-02'), NaT],\n [Timestamp('2014-01-01', tz='US/Eastern'),\n Timestamp('2014-01-02', tz='US/Eastern'), NaT],\n ]\n for val in vals:\n c = Categorical(val)\n c2 = Categorical(c)\n tm.assert_categorical_equal(c, c2)\n\n @pytest.mark.parametrize('ordered', [True, False])\n def test_constructor_with_dtype(self, ordered):\n categories = ['b', 'a', 'c']\n dtype = CategoricalDtype(categories, ordered=ordered)\n result = Categorical(['a', 'b', 'a', 'c'], dtype=dtype)\n expected = Categorical(['a', 'b', 'a', 'c'], categories=categories,\n ordered=ordered)\n tm.assert_categorical_equal(result, expected)\n assert result.ordered is ordered\n\n def test_constructor_dtype_and_others_raises(self):\n dtype = CategoricalDtype(['a', 'b'], ordered=True)\n with pytest.raises(ValueError, match=\"Cannot\"):\n Categorical(['a', 'b'], categories=['a', 'b'], dtype=dtype)\n\n with pytest.raises(ValueError, match=\"Cannot\"):\n Categorical(['a', 'b'], ordered=True, dtype=dtype)\n\n with pytest.raises(ValueError, match=\"Cannot\"):\n Categorical(['a', 'b'], ordered=False, dtype=dtype)\n\n @pytest.mark.parametrize('categories', [\n None, ['a', 'b'], ['a', 'c'],\n ])\n @pytest.mark.parametrize('ordered', [True, False])\n def test_constructor_str_category(self, categories, ordered):\n result = Categorical(['a', 'b'], categories=categories,\n ordered=ordered, dtype='category')\n expected = Categorical(['a', 'b'], categories=categories,\n ordered=ordered)\n tm.assert_categorical_equal(result, expected)\n\n def test_constructor_str_unknown(self):\n with pytest.raises(ValueError, match=\"Unknown `dtype`\"):\n Categorical([1, 2], dtype=\"foo\")\n\n def test_constructor_from_categorical_with_dtype(self):\n dtype = CategoricalDtype(['a', 'b', 'c'], ordered=True)\n values = Categorical(['a', 'b', 'd'])\n result = Categorical(values, dtype=dtype)\n # We use dtype.categories, not values.categories\n expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],\n ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n def test_constructor_from_categorical_with_unknown_dtype(self):\n dtype = CategoricalDtype(None, ordered=True)\n values = Categorical(['a', 'b', 'd'])\n result = Categorical(values, dtype=dtype)\n # We use values.categories, not dtype.categories\n expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'd'],\n ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n def test_constructor_from_categorical_string(self):\n values = Categorical(['a', 'b', 'd'])\n # use categories, ordered\n result = Categorical(values, categories=['a', 'b', 'c'], ordered=True,\n dtype='category')\n expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],\n ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n # No string\n result = Categorical(values, categories=['a', 'b', 'c'], ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n def test_constructor_with_categorical_categories(self):\n # GH17884\n expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])\n\n result = Categorical(\n ['a', 'b'], categories=Categorical(['a', 'b', 'c']))\n tm.assert_categorical_equal(result, expected)\n\n result = Categorical(\n ['a', 'b'], categories=CategoricalIndex(['a', 'b', 'c']))\n tm.assert_categorical_equal(result, expected)\n\n def test_from_codes(self):\n\n # too few categories\n with pytest.raises(ValueError):\n Categorical.from_codes([1, 2], [1, 2])\n\n # no int codes\n with pytest.raises(ValueError):\n Categorical.from_codes([\"a\"], [1, 2])\n\n # no unique categories\n with pytest.raises(ValueError):\n Categorical.from_codes([0, 1, 2], [\"a\", \"a\", \"b\"])\n\n # NaN categories included\n with pytest.raises(ValueError):\n Categorical.from_codes([0, 1, 2], [\"a\", \"b\", np.nan])\n\n # too negative\n with pytest.raises(ValueError):\n Categorical.from_codes([-2, 1, 2], [\"a\", \"b\", \"c\"])\n\n exp = Categorical([\"a\", \"b\", \"c\"], ordered=False)\n res = Categorical.from_codes([0, 1, 2], [\"a\", \"b\", \"c\"])\n tm.assert_categorical_equal(exp, res)\n\n # Not available in earlier numpy versions\n if hasattr(np.random, \"choice\"):\n codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])\n Categorical.from_codes(codes, categories=[\"train\", \"test\"])\n\n def test_from_codes_with_categorical_categories(self):\n # GH17884\n expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])\n\n result = Categorical.from_codes(\n [0, 1], categories=Categorical(['a', 'b', 'c']))\n tm.assert_categorical_equal(result, expected)\n\n result = Categorical.from_codes(\n [0, 1], categories=CategoricalIndex(['a', 'b', 'c']))\n tm.assert_categorical_equal(result, expected)\n\n # non-unique Categorical still raises\n with pytest.raises(ValueError):\n Categorical.from_codes([0, 1], Categorical(['a', 'b', 'a']))\n\n def test_from_codes_with_nan_code(self):\n # GH21767\n codes = [1, 2, np.nan]\n categories = ['a', 'b', 'c']\n with pytest.raises(ValueError):\n Categorical.from_codes(codes, categories)\n\n def test_from_codes_with_float(self):\n # GH21767\n codes = [1.0, 2.0, 0] # integer, but in float dtype\n categories = ['a', 'b', 'c']\n\n with tm.assert_produces_warning(FutureWarning):\n cat = Categorical.from_codes(codes, categories)\n tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype='i1'))\n\n codes = [1.1, 2.0, 0] # non-integer\n with pytest.raises(ValueError):\n Categorical.from_codes(codes, categories)\n\n @pytest.mark.parametrize('dtype', [None, 'category'])\n def test_from_inferred_categories(self, dtype):\n cats = ['a', 'b']\n codes = np.array([0, 0, 1, 1], dtype='i8')\n result = Categorical._from_inferred_categories(cats, codes, dtype)\n expected = Categorical.from_codes(codes, cats)\n tm.assert_categorical_equal(result, expected)\n\n @pytest.mark.parametrize('dtype', [None, 'category'])\n def test_from_inferred_categories_sorts(self, dtype):\n cats = ['b', 'a']\n codes = np.array([0, 1, 1, 1], dtype='i8')\n result = Categorical._from_inferred_categories(cats, codes, dtype)\n expected = Categorical.from_codes([1, 0, 0, 0], ['a', 'b'])\n tm.assert_categorical_equal(result, expected)\n\n def test_from_inferred_categories_dtype(self):\n cats = ['a', 'b', 'd']\n codes = np.array([0, 1, 0, 2], dtype='i8')\n dtype = CategoricalDtype(['c', 'b', 'a'], ordered=True)\n result = Categorical._from_inferred_categories(cats, codes, dtype)\n expected = Categorical(['a', 'b', 'a', 'd'],\n categories=['c', 'b', 'a'],\n ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n def test_from_inferred_categories_coerces(self):\n cats = ['1', '2', 'bad']\n codes = np.array([0, 0, 1, 2], dtype='i8')\n dtype = CategoricalDtype([1, 2])\n result = Categorical._from_inferred_categories(cats, codes, dtype)\n expected = Categorical([1, 1, 2, np.nan])\n tm.assert_categorical_equal(result, expected)\n\n def test_construction_with_ordered(self):\n # GH 9347, 9190\n cat = Categorical([0, 1, 2])\n assert not cat.ordered\n cat = Categorical([0, 1, 2], ordered=False)\n assert not cat.ordered\n cat = Categorical([0, 1, 2], ordered=True)\n assert cat.ordered\n\n @pytest.mark.xfail(reason=\"Imaginary values not supported in Categorical\")\n def test_constructor_imaginary(self):\n values = [1, 2, 3 + 1j]\n c1 = Categorical(values)\n tm.assert_index_equal(c1.categories, Index(values))\n tm.assert_numpy_array_equal(np.array(c1), np.array(values))\n"
] |
[
[
"pandas.core.window.ExpandingGroupby",
"pandas.core.dtypes.common.is_extension_array_dtype",
"numpy.asarray",
"numpy.minimum.accumulate",
"pandas.core.groupby.grouper._get_grouper",
"pandas.core.sorting.get_group_index_sorter",
"pandas.core.dtypes.missing.notna",
"numpy.concatenate",
"pandas.compat.iteritems",
"pandas.util._decorators.Substitution",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.core.window.RollingGroupby",
"pandas.core.series.Series",
"pandas.errors.AbstractMethodError",
"numpy.arange",
"pandas.compat.callable",
"pandas.core.common._not_none",
"pandas.core.config.option_context",
"pandas.util._validators.validate_kwargs",
"pandas.core.common._pipe",
"numpy.repeat",
"pandas.compat.set_function_name",
"numpy.zeros",
"pandas.core.algorithms.take_nd",
"pandas.util._decorators.Appender",
"pandas.core.base.DataError",
"numpy.nonzero",
"pandas._libs.Timestamp",
"pandas.core.resample.get_resampler_for_grouping",
"pandas.core.reshape.concat.concat",
"numpy.errstate",
"numpy.array",
"pandas.core.dtypes.common.ensure_float",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"pandas.core.dtypes.common.is_scalar",
"pandas.compat.zip",
"pandas.core.dtypes.missing.isna",
"numpy.maximum.accumulate",
"pandas.compat.numpy.function.validate_groupby_func",
"numpy.empty",
"pandas.core.index.Index"
],
[
"pandas.Series",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.assert_index_equal",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.util.testing.assert_categorical_equal",
"numpy.arange",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.Int64Index",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.Categorical.from_codes",
"pandas.core.dtypes.common.is_integer_dtype",
"numpy.random.choice",
"pandas.Categorical",
"pandas.Interval",
"pandas.date_range",
"pandas.core.dtypes.dtypes.CategoricalDtype",
"numpy.array",
"pandas.timedelta_range",
"pandas.CategoricalIndex",
"pandas.period_range",
"pandas.Categorical._from_inferred_categories",
"pandas.Period",
"pandas.Timestamp"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vctrop/ant_colony_for_continuous_domains
|
[
"a109abfca35be4d0453c7e01f6f755c11ae09473"
] |
[
"acor_plots.py"
] |
[
"#!python3\n\n# Copyright (C) 2020 Victor O. Costa\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n \n# Python standar lib\nimport math\n# 3rth party\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Own\nfrom base_metaheuristic import Base\n\nclass ACOr(Base):\n \"\"\" Class for the Ant Colony Optimization for Continuous Domains, following (Socha and Dorigo, 2006) \"\"\"\n\n def __init__(self):\n \"\"\" Constructor \"\"\"\n # Define verbosity and NULL problem definition\n super().__init__()\n \n # Initial algorithm parameters\n self.relative_iterations = None # Array containing the iterations at which best solutions are reported\n self.num_iter = 0 # Number of iterations\n self.pop_size = 5 # Population size\n self.k = 50 # Archive size\n self.q = 0.01 # Locality of search (selection of pivot ants)\n self.xi = 0.85 # Speed of convergence (spreadness of ant generation)\n \n # Optimization results\n self.SA = None # Solution Archive\n self.best_solution = None # Best solution of the archive\n \n\n def set_parameters(self, pop_size, k, q, xi, function_evaluations_array):\n \"\"\" Define values for the parameters used by the algorithm \"\"\"\n # Input error checking\n if len(function_evaluations_array) == 0:\n print(\"Error, objective function evaluation array must not be empty\")\n exit(-1)\n if pop_size <= 0 or k <= 0 or q <= 0 or xi <= 0:\n print(\"Error, parameters must be non-null positives\")\n exit(-1)\n \n \n # Number of function evaluations for ACOr: pop_size * num_iterations\n function_evaluations_array = np.array(function_evaluations_array)\n self.relative_iterations = (function_evaluations_array - k) / pop_size\n all_divisible = (np.array([x.is_integer() for x in self.relative_iterations])).all()\n if not all_divisible:\n print(\"Error, at least one number of function evaluations subtracted by k is not divisible by population size m\")\n exit(-1)\n \n self.num_iter = int(np.max(self.relative_iterations))\n self.pop_size = pop_size\n self.k = k\n self.q = q\n self.xi = xi\n\n \n def define_variables(self, initial_ranges, is_bounded):\n \"\"\" Defines the number of variables, their initial values ranges and wether or not these ranges constrain the variable during the search \"\"\"\n # Input error checking\n if self.num_iter == 0:\n print(\"Error, please set algorithm parameters before variables definition\")\n exit(-1)\n if len(initial_ranges) == 0 or len(is_bounded) == 0:\n print(\"Error, initial_ranges and is_bounded lists must not be empty\")\n exit(-1)\n if len(initial_ranges) != len(is_bounded):\n print(\"Error, the number of variables for initial_ranges and is_bounded must be equal\")\n exit(-1)\n \n self.num_variables = len(initial_ranges)\n self.initial_ranges = initial_ranges\n self.is_bounded = is_bounded\n self.SA = np.zeros((self.k, self.num_variables + 1))\n\n \n def _biased_selection(self, probabilities):\n \"\"\" Returns an index based on a set of probabilities (also known as roulette wheel selection in GA) \"\"\"\n r = np.random.uniform(0, sum(probabilities))\n for i, f in enumerate(probabilities):\n r -= f\n if r <= 0:\n return i\n \n\n def update_success_rate(self, success_count):\n \"\"\" Success rate is not updated in vanilla ACOr \"\"\"\n pass\n \n def control_xi(self):\n \"\"\" Xi is not updated in vanilla ACOr \"\"\"\n pass\n \n def control_q(self):\n \"\"\" q is not updated in vanilla ACOr \"\"\"\n pass\n \n def gaussian_pdf_weights(self, x):\n gaus_std = self.q * self.k\n gaus_avg = 1\n w = (1 / (gaus_std * math.sqrt(2 * math.pi))) * np.exp( (-1/2) * ( ( (x - gaus_avg) / gaus_std ) ** 2) )\n \n return w\n \n def handle_adaptions(self, success_count):\n self.update_success_rate(success_count)\n self.control_q()\n self.control_xi()\n \n def optimize(self):\n \"\"\" Initializes the archive and enter the main loop, until it reaches maximum number of iterations \"\"\"\n # Error checking\n if self.num_variables == None:\n print(\"Error, number of variables and their boundaries must be defined prior to optimization\")\n exit(-1)\n if self.cost_function == None:\n print(\"Error, cost function must be defined prior to optimization\")\n exit(-1)\n \n # Keep solutions defined by function_evaluations_array\n recorded_solutions = []\n q_list =[]\n xi_list = []\n # Initialize the archive by random sampling, respecting each variable's boundaries \n if self.verbosity: print(\"[INITIALIZING SOLUTION ARCHIVE]\")\n pop = np.zeros((self.pop_size, self.num_variables +1))\n w = np.zeros(self.k)\n \n for i in range(self.k):\n for j in range(self.num_variables): \n self.SA[i, j] = np.random.uniform(self.initial_ranges[j][0], self.initial_ranges[j][1]) # Initialize solution archive randomly\n self.SA[i, -1] = self.cost_function(self.SA[i, 0:self.num_variables]) # Get initial cost for each solution\n self.SA = self.SA[self.SA[:, -1].argsort()] # Sort solution archive (best solutions first)\n \n # Array containing indices of solution archive position\n x = np.linspace(1,self.k,self.k) \n w = self.gaussian_pdf_weights(x) # Weights as a gaussian function of rank with mean 1, std qk\n p = w/sum(w) \n \n if self.verbosity: print(\"ALGORITHM MAIN LOOP\")\n # Algorithm runs until it reaches the determined number of iterations\n for iteration in range(self.num_iter):\n if self.verbosity:\n print(\"[%d]\" % iteration)\n print(self.SA[0, :])\n \n success_count = 0 # Count how many ant improve the solution they are sampling from \n Mi = self.SA[:, 0:self.num_variables] # Matrix of means\n for ant in range(self.pop_size): # For each ant in the population\n l = self._biased_selection(p) # Select solution of the SA to sample from based on probabilities p\n # Compute average distances from the chosen solution to other solutions\n # Used as standard deviation of solution generation\n sigmas_array = self.xi * np.sum(np.abs(self.SA[:,:-1] - self.SA[l, :-1]), axis = 0) / (self.k - 1)\n \n for var in range(self.num_variables):\n sigma = sigmas_array[var]\n pop[ant, var] = np.random.normal(Mi[l, var], sigma) # Sample from normal distribution with mean Mi and st. dev. sigma\n \n # Search space boundaries violation is only dealt with when the variable is considered bounded (self.is_bounded)\n if self.is_bounded[var]:\n # Use the hard border strategy\n if pop[ant, var] < self.initial_ranges[var][0]:\n pop[ant, var] = self.initial_ranges[var][0]\n elif pop[ant, var] > self.initial_ranges[var][1]:\n pop[ant, var] = self.initial_ranges[var][1] \n \n # Use the random position strategy\n # if pop[ant, var] < self.initial_ranges[var][0] or pop[ant, var] > self.initial_ranges[var][1]: \n # pop[ant, var] = np.random.uniform(self.initial_ranges[var][0], self.initial_ranges[var][1])\n \n # Evaluate cost of new solution\n pop[ant, -1] = self.cost_function(pop[ant, 0:self.num_variables]) \n \n # Check if the new solution is better than the one the ant sampled from\n if pop[ant, -1] < self.SA[l, -1]:\n success_count += 1\n \n # Compute success rate, updates xi and q (No effect in vanilla ACOr)\n self.handle_adaptions(success_count)\n q_list.append(self.q)\n xi_list.append(self.xi)\n # Append new solutions to the Archive\n self.SA = np.append(self.SA, pop, axis = 0) \n # Update PDF from which ants sample their centers, according to updates in q parameter\n w = self.gaussian_pdf_weights(x) # Weights as a gaussian function of rank with mean 1, std qk\n p = w/sum(w) # Probabilities of selecting solutions as search guides\n \n # Sort solution archive according to the fitness of each solution\n self.SA = self.SA[self.SA[:, -1].argsort()] \n # Remove worst solutions\n self.SA = self.SA[0:self.k, :] \n # Extract current best solution\n self.best_solution = np.array(self.SA[0, :])\n if (self.relative_iterations - 1 == iteration).any():\n recorded_solutions.append(np.array(self.best_solution))\n \n \n return q_list, xi_list, np.array(recorded_solutions)\n \n## The following classes show that the idea of exploration/exploitation adaption based in the success rate of the swarm in AIWPS (Nickabadi et al., 2011) can be applied to ACOr, and possibly many other swarm-based metaheuristics.\n\n# Success rate adaptive ACOr \nclass SRAACOr(ACOr):\n \"\"\" Parent class of all adaptive versions of ACOr.\"\"\"\n \n def __init__(self):\n \"\"\" Constructor \"\"\"\n super().__init__()\n self.success_rate = None\n self.min = {'q' : None,\n 'xi': None}\n self.max = {'q' : None,\n 'xi': None}\n self.map_type = {'q' : None,\n 'xi': None}\n \n self.lin_a = {'q' : None,\n 'xi': None}\n self.lin_b = {'q' : None,\n 'xi': None}\n \n self.sig_K = 2\n self.sig_Q = {'q' : None,\n 'xi': None}\n self.sig_B = {'q' : None,\n 'xi': None}\n \n self.exp_A = {'q' : None,\n 'xi': None}\n self.exp_B = {'q' : None,\n 'xi': None}\n \n \n def update_success_rate(self, success_count):\n \"\"\" Returns the success rate of the swarm at a given iteration,\n considering how many ants generated better solutions than the solutions they sampled from \"\"\"\n self.success_rate = success_count / self.pop_size\n \n \n def parameterize_map(self, parameter):\n if not isinstance(parameter, str) or (parameter != 'q' and parameter != 'xi'):\n print('Parameter must be a string equal to \\'q\\' or \\'xi\\'')\n exit(-1)\n \n if self.map_type[parameter] == 'lin':\n self.lin_a[parameter] = self.max[parameter] - self.min[parameter]\n self.lin_b[parameter] = self.min[parameter]\n elif self.map_type[parameter] == 'sig':\n self.sig_Q[parameter] = (self.sig_K - self.min[parameter]) / self.min[parameter]\n self.sig_B[parameter] = math.log( (self.max[parameter] / (self.sig_K - self.max[parameter])) * self.sig_Q[parameter])\n else:\n self.exp_A[parameter] = self.min[parameter]\n self.exp_B[parameter] = math.log( self.max[parameter] / self.min[parameter] )\n \n \n def evaluate_map(self, parameter, x):\n if not isinstance(parameter, str) or (parameter != 'q' and parameter != 'xi'):\n print('Parameter must be a string equal to \\'q\\' or \\'xi\\'')\n exit(-1)\n \n if self.map_type[parameter] == None:\n print('Please first define the map type of ' + parameter)\n exit(-1)\n \n # Linear map\n if self.map_type[parameter] == 'lin':\n if self.lin_a[parameter] == None or self.lin_b[parameter] == None:\n print('Error, first parameterize the line')\n exit(-1)\n y = self.lin_a[parameter] * x + self.lin_b[parameter]\n # Sigmoidal map\n elif self.map_type[parameter] == 'sig':\n if self.sig_Q[parameter] == None or self.sig_B[parameter] == None:\n print('Error, first parameterize the sigmoid')\n exit(-1)\n y = self.sig_K / (1 + self.sig_Q[parameter] * math.exp(- self.sig_B[parameter] * x))\n # Exponential map\n else:\n if self.exp_A[parameter] == None or self.exp_B[parameter] == None:\n print('Error, first parameterize the exponential')\n exit(-1)\n y = self.exp_A[parameter] * math.exp( self.exp_B[parameter] * x )\n return y\n \n \n# Adaptive elitism level ACOr\nclass AELACOr(SRAACOr):\n \"\"\" Adaptive control of the q parameter \"\"\"\n def __init__(self):\n \"\"\" Constructor \"\"\"\n super().__init__()\n \n def set_parameters(self, pop_size, k, xi, min_q, max_q, map_type, function_evaluations_array):\n \"\"\" Define values for the parameters used by the algorithm \"\"\"\n # Input error checking\n if min_q > max_q:\n print('Error, maximum q must be greater than minimum q')\n exit(-1)\n if min_q <= 0:\n print('Error, minimum q must be greater than zero')\n exit(-1)\n if not isinstance(map_type, str):\n print('Error, map from success rate to q must be a string')\n exit(-1)\n if map_type != 'lin' and map_type != 'sig' and map_type != 'exp':\n print('Error, map type must be \\'lin\\', \\'sig\\' or \\'exp\\'')\n exit(-1)\n if map_type == 'sig' and max_q >= self.sig_K:\n print('Error, maximum q must be lesser than sigmoid K = ' + str(self.sig_K))\n \n # Parameter setting from ACOr class\n super().set_parameters(pop_size, k, max_q, xi, function_evaluations_array) \n\n # Parameterize control curve\n self.min['q'] = min_q\n self.max['q'] = max_q\n self.map_type['q'] = map_type\n self.parameterize_map('q')\n \n \n def control_q(self):\n \"\"\" Use population success rate to update q \"\"\"\n if self.success_rate == None:\n print(\"Error, compute success rate before updating q\")\n exit(-1)\n \n # Compute new q, directly proportional (linearity or not) to the success rate\n self.q = self.evaluate_map('q', self.success_rate)\n \n \n# Adaptive generation dispersion ACOr\nclass AGDACOr(SRAACOr):\n \"\"\" Adaptive control of the xi parameter \"\"\"\n \n def __init__(self):\n \"\"\" Constructor \"\"\"\n super().__init__()\n \n def set_parameters(self, pop_size, k, q, min_xi, max_xi, map_type, function_evaluations_array):\n \"\"\" Define values for the parameters used by the algorithm \"\"\"\n # Input error checking\n if min_xi > max_xi:\n print('Error, maximum xi must be greater than minimum xi')\n exit(-1)\n if min_xi <= 0:\n print('Error, minimum xi must be greater than zero')\n exit(-1)\n if not isinstance(map_type, str):\n print('Error, map from success rate to xi must be a string')\n exit(-1)\n if map_type != 'lin' and map_type != 'sig' and map_type != 'exp':\n print('Error, map type must be \\'lin\\', \\'sig\\' or \\'exp\\'')\n exit(-1)\n if map_type == 'sig' and max_xi >= self.sig_K:\n print('Error, maximum xi must be lesser than sigmoid K = ' + str(self.sig_K))\n \n # Parameter setting from ACOr class\n super().set_parameters(pop_size, k, q, max_xi, function_evaluations_array) \n\n # Minimum and maximum of adaptive xi\n # Parameterize control curve\n self.min['xi'] = min_xi\n self.max['xi'] = max_xi\n self.map_type['xi'] = map_type\n self.parameterize_map('xi')\n \n def control_xi(self):\n \"\"\" Use population success rate to update Xi \"\"\"\n if self.success_rate == None:\n print(\"Error, compute success rate before updating xi\")\n exit(-1)\n \n # Compute new xi, inversely proportional (linearity or not) to the success rate\n self.xi = self.evaluate_map('xi', (1 - self.success_rate))\n\n \n# Bi-adaptive ACOr\nclass BAACOr(SRAACOr):\n \"\"\" Adaptive control of the both q and xi parameters \"\"\"\n \n def __init__(self):\n \"\"\" Constructor \"\"\"\n super().__init__()\n\n \n def set_parameters(self, pop_size, k, min_q, max_q, min_xi, max_xi, q_map_type, xi_map_type, function_evaluations_array):\n \"\"\" Define values for the parameters used by the algorithm \"\"\"\n # Input error checking\n if min_xi > max_xi or min_q > min_q:\n print('Error, maximum parameters must be greater than minimum ones')\n exit(-1)\n if min_xi <= 0 or min_q <= 0:\n print('Error, minimum parameters must be greater than zero')\n exit(-1)\n if not isinstance(q_map_type, str) or not isinstance(xi_map_type, str):\n print('Error, maps from success rate to parameters must be strings')\n exit(-1)\n if (q_map_type != 'lin' and q_map_type != 'sig' and q_map_type != 'exp') or (xi_map_type != 'lin' and xi_map_type != 'sig' and xi_map_type != 'exp'):\n print('Error, map types must be \\'lin\\', \\'sig\\' or \\'exp\\'')\n exit(-1)\n if (q_map_type == 'sig' and max_q >= self.sig_K) or (xi_map_type == 'sig' and max_xi >= self.sig_K):\n print('Error, maximum parameters value must be lesser than sigmoid K = ' + str(self.sig_K))\n \n # Parameter setting from ACOr class\n super().set_parameters(pop_size, k, max_q, max_xi, function_evaluations_array)\n\n # Parameterize xi control curve\n self.min['xi'] = min_xi\n self.max['xi'] = max_xi\n self.map_type['xi'] = xi_map_type\n self.parameterize_map('xi')\n # Parameterize q control curve\n self.min['q'] = min_q\n self.max['q'] = max_q\n self.map_type['q'] = q_map_type\n self.parameterize_map('q')\n \n \n def control_xi(self):\n \"\"\" Use population success rate to update Xi \"\"\"\n if self.success_rate == None:\n print(\"Error, first compute success rate\")\n exit(-1)\n \n # Compute new xi\n self.xi = self.evaluate_map('xi', (1 - self.success_rate))\n \n \n def control_q(self):\n \"\"\" Use population success rate to update Xi \"\"\"\n if self.success_rate == None:\n print(\"Error, first compute success rate\")\n exit(-1)\n \n # Compute new q\n self.q = self.evaluate_map('q', self.success_rate)\n "
] |
[
[
"numpy.abs",
"numpy.linspace",
"numpy.max",
"numpy.append",
"numpy.random.normal",
"numpy.random.uniform",
"numpy.array",
"numpy.exp",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rd11490/owl-map-score-added
|
[
"80ce7e6a08d015a8890253ef2f31fd67213a7868"
] |
[
"map_score.py"
] |
[
"import pandas as pd\nfrom utils.constants import Maps, total_escort_map_distance, total_map_time, calc_map_type, time_to_add\nfrom utils.utils import calc_match_date, calc_season\n\n# Some readability options for pandas print statements\npd.set_option('display.max_columns', 500)\npd.set_option('display.max_rows', 200)\npd.set_option('display.width', 1000)\n\n# Read in our match_map_stats csv file\nframe = pd.read_csv('./map_data/match_map_stats.csv')\n\n# Determine the map type, match date, and season for every map played\nframe['map_type'] = frame['map_name'].apply(calc_map_type)\nframe['match_date'] = frame['round_end_time'].apply(calc_match_date)\nframe['season'] = frame['round_end_time'].apply(calc_season)\n\n# Split our dataframe into four different frames. Each frame will only contain maps for it's specific map type\nescort_maps = frame[frame['map_type'] == Maps.Escort]\nassault_maps = frame[frame['map_type'] == Maps.Assault]\ncontrol_maps = frame[frame['map_type'] == Maps.Control]\nhybrid_maps = frame[frame['map_type'] == Maps.Hybrid]\n\n\n###############################\n# Calculate Assault map score #\n###############################\n# The basic idea behind our calculation for map score is\n# \"How many times could you complete the map in at the rate at which you initially completed the map\".\n# Unfortunately OWL data does not give us partial capture percentage, so we only get an integer N\n# which represents how many control points a team captured.\ndef calculate_assault_map_score(group):\n # I am limiting this analysis to the intial map parameters (2 rounds) and ignore any tie breaker/overtime scenarios.\n row = group[group['map_round'] == 2]\n # There is some old (bad) data in the dataset that needs to be cleaned. This line cleans that for us.\n if row.empty:\n row = group[group['map_round'] == group['map_round'].max()]\n\n # Break out attacker and defender into team 1 and team 2\n team_one = row['attacker'].values[0]\n team_two = row['defender'].values[0]\n\n # Pull out the number of points each team captured\n team_one_points = row['attacker_round_end_score'].values[0]\n team_two_points = row['defender_round_end_score'].values[0]\n\n # Pull out the amount of team each team banked if they completed the map\n team_one_time_banked = row['attacker_time_banked'].values[0]\n team_two_time_banked = row['defender_time_banked'].values[0]\n\n # When determining how much time each team had available we need to pull out the number of points they captured.\n # We can calculate that based on the rule set for the map type.\n # For Assault: 4 Minutes to attack point 1, an additional 4 minutes to attack point 2\n team_one_points_for_time = team_one_points\n team_two_points_for_time = team_two_points\n\n # There is an important exception here. If the winning team does not full cap the map, the number of points\n # they are given credit for is 1 more than they had actually capped.\n # We need to subtract that additional point from their score to properly account for how much time the team used.\n # Because we are always taking the second row (after both teams have attacked)\n # we do not need to account for time banked if team 1 is the winner as they are the second attacker.\n if row['map_winner'].values[0] == team_one:\n team_one_points_for_time -= 1\n elif row['map_winner'].values[0] == team_two and team_two_time_banked > 0.0:\n team_two_points_for_time -= 1\n\n team_one_total_time = total_map_time(Maps.Assault, team_one_points_for_time)\n team_two_total_time = total_map_time(Maps.Assault, team_two_points_for_time)\n\n # Now that we know how much time each team had to attack, how much time they banked,\n # and how many points they captured, we can calculate their cap rate.\n team_one_rate = team_one_points / (team_one_total_time - team_one_time_banked)\n team_two_rate = team_two_points / (team_two_total_time - team_two_time_banked)\n\n # If the team banked time, we want to give them credit for it. We do this by applying their cap rate to their banked\n # time to estimate how many points they could have capped if they kept their current rate.\n if team_one_time_banked > 0.0:\n team_one_score = (team_one_rate * team_one_time_banked) + team_one_points\n else:\n team_one_score = team_one_points\n\n if team_two_time_banked > 0.0:\n team_two_score = (team_two_rate * team_two_time_banked) + team_two_points\n else:\n team_two_score = team_two_points\n\n # Finally we want to divide each team's score by the total number of possible points in order to get a\n # % map completion estimate.\n team_one_score = team_one_score / 2\n team_two_score = team_two_score / 2\n\n return pd.Series({\n 'map_name': row['map_name'].values[0],\n 'map_type': row['map_type'].values[0],\n 'map_winner': row['map_winner'].values[0],\n 'match_date': row['match_date'].values[0],\n 'team_one_name': team_one,\n 'team_two_name': team_two,\n 'team_one_score': team_one_score * 100,\n 'team_two_score': team_two_score * 100,\n 'season': row['season'].values[0]\n })\n\n\n###############################\n# Calculate Payload map score #\n###############################\n# The basic idea behind our calculation for map score for escort and hybrid maps is\n# \"What percentage of an escort map could a team complete at the rate at which they pushed the payload initially\".\n# We can do this by calculating the total distance the payload traveled, add any additional distance using\n# the time banked and the rate at which the team pushed the payload, and dividing by the total distance for the map.\n\ndef calculate_payload_map_score(group):\n # I am limiting this analysis to the intial map parameters (2 rounds) and ignore any tie breaker/overtime scenarios.\n row = group[group['map_round'] == 2]\n # There is some old (bad) data in the dataset that needs to be cleaned. This line cleans that for us.\n if row.empty:\n row = group[group['map_round'] == group['map_round'].max()]\n\n # Pull out the map name\n map_name = row['map_name'].values[0]\n\n # Break out attacker and defender into team 1 and team 2\n team_one = row['attacker'].values[0]\n team_two = row['defender'].values[0]\n\n # Pull out how many points each team was given credit for capping\n team_one_points = row['attacker_round_end_score'].values[0]\n team_two_points = row['defender_round_end_score'].values[0]\n\n # Pull out how much time each team banked if they finished the map\n team_one_time_banked = row['attacker_time_banked'].values[0]\n team_two_time_banked = row['defender_time_banked'].values[0]\n\n # pull out how much distance each team traveled past their final capture point (if they did not complete the map)\n team_one_distance = row['attacker_payload_distance'].values[0]\n team_two_distance = row['defender_payload_distance'].values[0]\n\n # There is an important exception here. If the winning team does not full cap the map, the number of points\n # they are given credit for is 1 more than they had actually capped.\n # We need to subtract that additional point to properly account for how much time the team used\n # and how far they actually pushed the payload. We also need to account for the case of a tie. If the team full\n # caps the map, we do not need to include that value as we are already\n # adding the distance the team traveled at that point.\n team_one_points_for_distance = team_one_points\n team_one_points_for_time = team_one_points\n\n team_two_points_for_distance = team_two_points\n team_two_points_for_time = team_two_points\n\n if team_one_points == 3:\n team_one_points_for_distance -= 1\n if team_two_points == 3:\n team_two_points_for_distance -= 1\n\n # We need to account for hybrid maps where a team full holds and then wins by capping first.\n # We also need to account for when team 1 prevents team two from finishing the map and then wins by\n # matching distance\n if (team_one_points == 1 and team_one_distance == 0) or (row['map_winner'].values[0] == team_one):\n team_one_points_for_time -= 1\n\n if (team_two_points == 1 and team_two_distance == 0) or (\n row['map_winner'].values[0] == team_two and team_two_time_banked > 0.0):\n team_two_points_for_time -= 1\n\n # We add the distance up until the previous capped point and the distance traveled at the current point together\n # to get the total distance traveled\n team_one_total_distance = total_escort_map_distance(map_name, team_one_points_for_distance) + team_one_distance\n team_two_total_distance = total_escort_map_distance(map_name, team_two_points_for_distance) + team_two_distance\n\n # We need to calculate the total amount of time each team had on their push.\n team_one_total_time = total_map_time(Maps.Escort, team_one_points_for_time)\n team_two_total_time = total_map_time(Maps.Escort, team_two_points_for_time)\n\n # Calculate the rate at which the attacking team pushed the payload\n team_one_rate = team_one_total_distance / (team_one_total_time - team_one_time_banked)\n team_two_rate = team_two_total_distance / (team_two_total_time - team_two_time_banked)\n\n # If the team banked time, we want to give them credit for it. We do this by applying their cap rate to their banked\n # time to estimate how much farther they could have pushed the payload had they continued at their current rate.\n if team_one_time_banked > 0.0:\n team_one_score = team_one_total_distance + (team_one_rate * team_one_time_banked)\n else:\n team_one_score = team_one_total_distance\n\n if team_two_time_banked > 0.0:\n team_two_score = team_two_total_distance + (team_two_rate * team_two_time_banked)\n else:\n team_two_score = team_two_total_distance\n\n # Finally we normalize by total map distance in order to get to map completion percentage\n total_map_distance = total_escort_map_distance(map_name, 3)\n team_one_score = team_one_score / total_map_distance\n team_two_score = team_two_score / total_map_distance\n\n return pd.Series({\n 'map_name': map_name,\n 'map_type': row['map_type'].values[0],\n 'map_winner': row['map_winner'].values[0],\n 'match_date': row['match_date'].values[0],\n 'team_one_name': team_one,\n 'team_two_name': team_two,\n 'team_one_score': team_one_score * 100,\n 'team_two_score': team_two_score * 100,\n 'season': row['season'].values[0]\n })\n\n\n###############################\n# Calculate control map score #\n###############################\n# Controls maps are \"easy\" to score because each team is able to get a control percentage.\n# Convert the percentage to a decimal and use it as the map score\ndef calculate_control_map_score(group):\n # Break out attacker and defender into team 1 and team 2\n team_one = group['attacker'].values[0]\n team_two = group['defender'].values[0]\n\n # Pull out how many points each team was given credit for capping\n team_one_score = group['attacker_control_perecent'].sum()/2\n team_two_score = group['defender_control_perecent'].sum()/2\n\n\n return pd.Series({\n 'map_name': group['map_name'].values[0],\n 'map_type': group['map_type'].values[0],\n 'map_winner': group['map_winner'].values[0],\n 'match_date': group['match_date'].values[0],\n 'team_one_name': team_one,\n 'team_two_name': team_two,\n 'team_one_score': team_one_score,\n 'team_two_score': team_two_score,\n 'season': group['season'].values[0]\n })\n# Finally we need to apply our scoring functions to each dataframe of map types,\n# and merge them all back together as a frame of scored maps\ncontrol_maps_score = control_maps.groupby(by=['match_id', 'game_number']).apply(calculate_control_map_score).reset_index()\nassault_scores = assault_maps.groupby(by=['match_id', 'game_number']).apply(calculate_assault_map_score).reset_index()\nescort_maps_score = escort_maps.groupby(by=['match_id', 'game_number']).apply(calculate_payload_map_score).reset_index()\nhybrid_maps_score = hybrid_maps.groupby(by=['match_id', 'game_number']).apply(calculate_payload_map_score).reset_index()\n\nscored_maps = pd.concat([control_maps_score, hybrid_maps_score, escort_maps_score, assault_scores])\n\nscored_maps.to_csv('results/scored_maps.csv', index=False)\n\nprint(scored_maps)\n"
] |
[
[
"pandas.set_option",
"pandas.read_csv",
"pandas.concat",
"pandas.Series"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
iaqos/ancona
|
[
"f9beefb966c2c98920bc7309d3b52df929082312"
] |
[
"generator.py"
] |
[
"#!/usr/bin/env python3\n# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet)\n\"\"\"\n\n\nimport argparse\nimport logging\nimport json\nimport numpy as np\nimport torch\n\nfrom transformers import (\n CTRLLMHeadModel,\n CTRLTokenizer,\n GPT2LMHeadModel,\n GPT2Tokenizer,\n OpenAIGPTLMHeadModel,\n OpenAIGPTTokenizer,\n TransfoXLLMHeadModel,\n TransfoXLTokenizer,\n XLMTokenizer,\n XLMWithLMHeadModel,\n XLNetLMHeadModel,\n XLNetTokenizer,\n)\n\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\", datefmt=\"%m/%d/%Y %H:%M:%S\", level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\nMAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop\n\nMODEL_CLASSES = {\n \"gpt2\": (GPT2LMHeadModel, GPT2Tokenizer),\n \"ctrl\": (CTRLLMHeadModel, CTRLTokenizer),\n \"openai-gpt\": (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),\n \"xlnet\": (XLNetLMHeadModel, XLNetTokenizer),\n \"transfo-xl\": (TransfoXLLMHeadModel, TransfoXLTokenizer),\n \"xlm\": (XLMWithLMHeadModel, XLMTokenizer),\n}\n\n# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia\n# in https://github.com/rusiaaman/XLNet-gen#methodology\n# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e\nPADDING_TEXT = \"\"\"In 1991, the remains of Russian Tsar Nicholas II and his family\n(except for Alexei and Maria) are discovered.\nThe voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the\nremainder of the story. 1883 Western Siberia,\na young Grigori Rasputin is asked by his father and a group of men to perform magic.\nRasputin has a vision and denounces one of the men as a horse thief. Although his\nfather initially slaps him for making such an accusation, Rasputin watches as the\nman is chased outside and beaten. Twenty years later, Rasputin sees a vision of\nthe Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,\nwith people, even a bishop, begging for his blessing. <eod> </s> <eos>\"\"\"\n\n\ndef set_seed(args):\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\n#\n# Functions to prepare models' input\n#\n\n\ndef prepare_ctrl_input(args, _, tokenizer, prompt_text):\n if args.temperature > 0.7:\n logger.info(\"CTRL typically works better with lower temperatures (and lower top_k).\")\n\n encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False)\n if not any(encoded_prompt[0] == x for x in tokenizer.control_codes.values()):\n logger.info(\"WARNING! You are not starting your generation from a control code so you won't get good results\")\n return prompt_text\n\n\ndef prepare_xlm_input(args, model, tokenizer, prompt_text):\n # kwargs = {\"language\": None, \"mask_token_id\": None}\n\n # Set the language\n use_lang_emb = hasattr(model.config, \"use_lang_emb\") and model.config.use_lang_emb\n if hasattr(model.config, \"lang2id\") and use_lang_emb:\n available_languages = model.config.lang2id.keys()\n if args.xlm_language in available_languages:\n language = args.xlm_language\n else:\n language = None\n while language not in available_languages:\n language = input(\"Using XLM. Select language in \" + str(list(available_languages)) + \" >>> \")\n\n model.config.lang_id = model.config.lang2id[language]\n # kwargs[\"language\"] = tokenizer.lang2id[language]\n\n # TODO fix mask_token_id setup when configurations will be synchronized between models and tokenizers\n # XLM masked-language modeling (MLM) models need masked token\n # is_xlm_mlm = \"mlm\" in args.model_name_or_path\n # if is_xlm_mlm:\n # kwargs[\"mask_token_id\"] = tokenizer.mask_token_id\n\n return prompt_text\n\n\ndef prepare_xlnet_input(args, _, tokenizer, prompt_text):\n prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text\n return prompt_text\n\n\ndef prepare_transfoxl_input(args, _, tokenizer, prompt_text):\n prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text\n return prompt_text\n\n\nPREPROCESSING_FUNCTIONS = {\n \"ctrl\": prepare_ctrl_input,\n \"xlm\": prepare_xlm_input,\n \"xlnet\": prepare_xlnet_input,\n \"transfo-xl\": prepare_transfoxl_input,\n}\n\n\ndef adjust_length_to_model(length, max_sequence_length):\n if length < 0 and max_sequence_length > 0:\n length = max_sequence_length\n elif 0 < max_sequence_length < length:\n length = max_sequence_length # No generation bigger than model size\n elif length < 0:\n length = MAX_LENGTH # avoid infinite loop\n return length\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n\n parser.add_argument(\"--prompt\", type=str, default=\"\")\n parser.add_argument(\"--length\", type=int, default=20)\n parser.add_argument(\"--stop_token\", type=str, default=None, help=\"Token at which text generation is stopped\")\n\n parser.add_argument(\n \"--temperature\",\n type=float,\n default=1.0,\n help=\"temperature of 1.0 has no effect, lower tend toward greedy sampling\",\n )\n parser.add_argument(\n \"--repetition_penalty\", type=float, default=1.0, help=\"primarily useful for CTRL model; in that case, use 1.2\"\n )\n parser.add_argument(\"--k\", type=int, default=0)\n parser.add_argument(\"--p\", type=float, default=0.9)\n\n parser.add_argument(\"--padding_text\", type=str, default=\"\", help=\"Padding text for Transfo-XL and XLNet.\")\n parser.add_argument(\"--xlm_language\", type=str, default=\"\", help=\"Optional language when used with the XLM model.\")\n\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\"--num_return_sequences\", type=int, default=1, help=\"The number of samples to generate.\")\n parser.add_argument(\"--title\", type=str, default=\"\", help=\"The title of the article.\")\n args = parser.parse_args()\n\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n\n set_seed(args)\n\n # Initialize the model and tokenizer\n try:\n args.model_type = args.model_type.lower()\n model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n except KeyError:\n raise KeyError(\"the model {} you specified is not supported. You are welcome to add it and open a PR :)\")\n\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n model = model_class.from_pretrained(args.model_name_or_path)\n model.to(args.device)\n\n args.length = adjust_length_to_model(args.length, max_sequence_length=model.config.max_position_embeddings)\n logger.info(args)\n\n prompt_text = args.prompt if args.prompt else input(\"Model prompt >>> \")\n\n # Different models need different input formatting and/or extra arguments\n requires_preprocessing = args.model_type in PREPROCESSING_FUNCTIONS.keys()\n if requires_preprocessing:\n prepare_input = PREPROCESSING_FUNCTIONS.get(args.model_type)\n preprocessed_prompt_text = prepare_input(args, model, tokenizer, prompt_text)\n encoded_prompt = tokenizer.encode(\n preprocessed_prompt_text, add_special_tokens=False, return_tensors=\"pt\", add_space_before_punct_symbol=True\n )\n else:\n encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors=\"pt\")\n encoded_prompt = encoded_prompt.to(args.device)\n\n if encoded_prompt.size()[-1] == 0:\n input_ids = None\n else:\n input_ids = encoded_prompt\n\n output_sequences = model.generate(\n input_ids=input_ids,\n max_length=args.length + len(encoded_prompt[0]),\n temperature=args.temperature,\n top_k=args.k,\n top_p=args.p,\n repetition_penalty=args.repetition_penalty,\n do_sample=True,\n num_return_sequences=args.num_return_sequences,\n )\n\n # Remove the batch dimension when returning multiple sequences\n if len(output_sequences.shape) > 2:\n output_sequences.squeeze_()\n\n generated_sequences = []\n print(args.title,\"\\n\")\n \n for generated_sequence_idx, generated_sequence in enumerate(output_sequences):\n print('\\n')#print(\"=== GENERATED SEQUENCE {} ===\".format(generated_sequence_idx + 1))\n generated_sequence = generated_sequence.tolist()\n\n # Decode text\n text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)\n\n # Remove all text after the stop token\n text = text[: text.find(args.stop_token) if args.stop_token else None]\n\n # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing\n total_sequence = (\n text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :] + \" [...]\"\n \n )\n\n generated_sequences.append(total_sequence)\n\n print(total_sequence)\n\n print_choice = input(\"Vuoi salvare l'articolo? Se sì, scrivi Y \")\n if print_choice == \"Y\":\n \n date = input(\"Inserisci la data in cui l'articolo sarà pubblicato, nel formato YYYY-MM-GG, es. 2020-12-31 \")\n id_num = input(\"Inserisci l'id \")\n titolo = args.title\n autore = input(\"Inserisci il nome dell'autore \")\n ordine = input(\"In che ordine vuoi gli articoli? Scrivi 231 per avere, nell'ordine, il secondo, il terzo e il primo \")\n sottotitolo = input(\"Inserisci il sottotitolo (è una frase o un pezzo di frase dall'articolo che vuoi compaia per primo) \")\n ord_art = [int(num) for num in ordine] #fare ordinamento articoli\n #print(ord_art)\n #print(type(generated_sequences), len(generated_sequences), \"\\n\", generated_sequences)\n article_dict = {\n \"id\": int(id_num),\n \"data\": date,\n \"titolo\": titolo,\n \"sottotitolo\": sottotitolo,\n \"articolo1\": generated_sequences[ord_art[0] - 1],\n \"articolo2\": generated_sequences[ord_art[1] - 1],\n \"articolo3\": generated_sequences[ord_art[2] - 1],\n \"autore\": autore\n }\n\n #article_json= json.dumps(article_dict)\n with open('{}.json'.format(date),'w', encoding='utf-8') as data:\n json.dump(article_dict, data, ensure_ascii=False)\n \n \n else:\n pass\n return generated_sequences\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.cuda.device_count"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Guoning-Chen/ssd.pytorch
|
[
"49c0e039bc3128ccc0176454059665a739d4e185"
] |
[
"layers/modules/multibox_loss.py"
] |
[
"# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom data import coco as cfg\nfrom ..box_utils import match, log_sum_exp\n\n\nclass MultiBoxLoss(nn.Module):\n \"\"\"SSD Weighted Loss Function\n Compute Targets:\n 1) Produce Confidence Target Indices by matching ground truth boxes\n with (default) 'priorboxes' that have jaccard index > threshold parameter\n (default threshold: 0.5).\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3) Hard negative mining to filter the excessive number of negative examples\n that comes with using a large number of default bounding boxes.\n (default negative:positive ratio 3:1)\n Objective Loss:\n L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n weighted by α which is set to 1 by cross val.\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n \"\"\"\n\n def __init__(self, num_classes, overlap_thresh, prior_for_matching,\n bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,\n use_gpu=True):\n super(MultiBoxLoss, self).__init__()\n self.use_gpu = use_gpu\n self.num_classes = num_classes\n self.threshold = overlap_thresh\n self.background_label = bkg_label\n self.encode_target = encode_target\n self.use_prior_for_matching = prior_for_matching\n self.do_neg_mining = neg_mining\n self.negpos_ratio = neg_pos\n self.neg_overlap = neg_overlap\n self.variance = cfg['variance']\n\n def forward(self, predictions, targets):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n and prior boxes from SSD net.\n conf shape: torch.size(batch_size,num_priors,num_classes)\n loc shape: torch.size(batch_size,num_priors,4)\n priors shape: torch.size(num_priors,4)\n\n targets (tensor): Ground truth boxes and labels for a batch,\n shape: [batch_size,num_objs,5] (last idx is the label).\n \"\"\"\n loc_data, conf_data, priors = predictions\n num = loc_data.size(0)\n priors = priors[:loc_data.size(1), :]\n num_priors = (priors.size(0))\n num_classes = self.num_classes\n\n # match priors (default boxes) and ground truth boxes\n loc_t = torch.Tensor(num, num_priors, 4)\n conf_t = torch.LongTensor(num, num_priors)\n for idx in range(num):\n truths = targets[idx][:, :-1].data\n labels = targets[idx][:, -1].data\n defaults = priors.data\n match(self.threshold, truths, defaults, self.variance, labels,\n loc_t, conf_t, idx)\n if self.use_gpu:\n loc_t = loc_t.cuda()\n conf_t = conf_t.cuda()\n # wrap targets\n loc_t = Variable(loc_t, requires_grad=False)\n conf_t = Variable(conf_t, requires_grad=False)\n\n pos = conf_t > 0\n num_pos = pos.sum(dim=1, keepdim=True)\n\n # Localization Loss (Smooth L1)\n # Shape: [batch,num_priors,4]\n pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)\n loc_p = loc_data[pos_idx].view(-1, 4)\n loc_t = loc_t[pos_idx].view(-1, 4)\n loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)\n\n # Compute max conf across batch for hard negative mining\n batch_conf = conf_data.view(-1, self.num_classes)\n loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))\n\n # Hard Negative Mining\n\n # 为解决一个bug交换了下面这两行\n loss_c = loss_c.view(num, -1)\n loss_c[pos] = 0 # filter out pos boxes for now\n\n _, loss_idx = loss_c.sort(1, descending=True)\n _, idx_rank = loss_idx.sort(1)\n num_pos = pos.long().sum(1, keepdim=True)\n num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)\n neg = idx_rank < num_neg.expand_as(idx_rank)\n\n # Confidence Loss Including Positive and Negative Examples\n pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)\n targets_weighted = conf_t[(pos+neg).gt(0)]\n loss_c = F.cross_entropy(conf_p, targets_weighted, size_average=False)\n\n # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n\n N = num_pos.data.sum()\n loss_l /= N\n loss_c /= N\n return loss_l, loss_c\n"
] |
[
[
"torch.LongTensor",
"torch.Tensor",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.smooth_l1_loss",
"torch.autograd.Variable"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arita37/pyvtreat
|
[
"c32e7ce6db11a2ccdd63e545b25028cbec03a3ff"
] |
[
"pkg/build/lib/vtreat/vtreat_api.py"
] |
[
"import warnings\n\nimport pandas\nimport numpy\n\nimport vtreat.vtreat_impl as vtreat_impl\nimport vtreat.util\nimport vtreat.cross_plan\n\n\ndef vtreat_parameters(user_params=None):\n \"\"\"build a vtreat parameters dictionary, adding in user choices\"\"\"\n\n params = {\n \"use_hierarchical_estimate\": True,\n \"coders\": {\n \"clean_copy\",\n \"missing_indicator\",\n \"indicator_code\",\n \"impact_code\",\n \"deviation_code\",\n \"logit_code\",\n \"prevalence_code\",\n },\n \"filter_to_recommended\": True,\n \"indicator_min_fraction\": 0.1,\n \"cross_validation_plan\": vtreat.cross_plan.KWayCrossPlanYStratified(),\n \"cross_validation_k\": 5,\n \"user_transforms\": [],\n \"sparse_indicators\": True,\n \"missingness_imputation\": numpy.mean,\n \"check_for_duplicate_frames\": True,\n \"error_on_duplicate_frames\": False,\n \"retain_cross_plan\": True,\n \"tunable_params\": [\n \"indicator_min_fraction\"\n ],\n }\n pkeys = set(params.keys())\n if user_params is not None:\n for k in user_params.keys():\n if k not in pkeys:\n raise KeyError(\"parameter key \" + str(k) + \" not recognized\")\n params[k] = user_params[k]\n if params[\"error_on_duplicate_frames\"]:\n params[\"check_for_duplicate_frames\"] = True\n for k in params[\"tunable_params\"]:\n if k not in pkeys:\n raise KeyError(\"tunable_params key \" + str(k) + \" not recognized\")\n return params\n\n\ndef unsupervised_parameters(user_params=None):\n \"\"\"build a vtreat parameters dictionary for unsupervised tasks, adding in user choices\"\"\"\n\n params = {\n \"coders\": {\n \"clean_copy\",\n \"missing_indicator\",\n \"indicator_code\",\n \"prevalence_code\",\n },\n \"indicator_min_fraction\": 0.0,\n \"user_transforms\": [],\n \"sparse_indicators\": True,\n \"missingness_imputation\": numpy.mean,\n \"tunable_params\": [\n \"indicator_min_fraction\"\n ],\n }\n pkeys = set(params.keys())\n if user_params is not None:\n for k in user_params.keys():\n if k not in pkeys:\n raise KeyError(\"parameter key \" + str(k) + \" not recognized\")\n params[k] = user_params[k]\n for k in params[\"tunable_params\"]:\n if k not in pkeys:\n raise KeyError(\"tunable_params key \" + str(k) + \" not recognized\")\n return params\n\n\nclass NumericOutcomeTreatment(vtreat_impl.VariableTreatment):\n \"\"\"manage a treatment plan for a numeric outcome (regression)\"\"\"\n\n def __init__(\n self, *,\n var_list=None,\n outcome_name=None,\n cols_to_copy=None,\n params=None,\n imputation_map=None,\n ):\n \"\"\"\n\n :param var_list: list or touple of column names\n :param outcome_name: name of column containing dependent variable\n :param cols_to_copy: list or touple of column names\n :param params: vtreat.vtreat_parameters()\n :param imputation_map: map of column names to custom missing imputation values or functions\n \"\"\"\n params = self.merge_params(params)\n vtreat_impl.VariableTreatment.__init__(\n self,\n var_list=var_list,\n outcome_name=outcome_name,\n cols_to_copy=cols_to_copy,\n params=params,\n imputation_map=imputation_map,\n )\n\n def merge_params(self, p):\n return vtreat_parameters(p)\n\n # noinspection PyPep8Naming\n def transform(self, X):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if self.last_fit_x_id_ is None:\n raise ValueError(\"called transform on not yet fit treatment\")\n if self.params_['check_for_duplicate_frames'] and (self.last_fit_x_id_ == vtreat.util.hash_data_frame(X)):\n if self.params_[\"error_on_duplicate_frames\"]:\n raise ValueError(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n warnings.warn(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n res = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n res = vtreat_impl.perform_transform(x=res, transform=self, params=self.params_)\n res = vtreat_impl.limit_to_appropriate_columns(res=res, transform=self)\n res, res_columns = vtreat_impl.back_to_orig_type_data_frame(res, orig_type)\n self.last_result_columns = res_columns\n return res\n\n # noinspection PyPep8Naming\n def fit_transform(self, X, y=None, **fit_params):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if y is None:\n if self.outcome_name_ is None:\n raise ValueError(\".fit_transform(X) must have outcome_name set\")\n y = numpy.asarray(X[self.outcome_name_])\n else:\n y = numpy.asarray(y)\n if (self.outcome_name_ is not None) and (self.outcome_name_ in X.columns):\n if not numpy.all(X[self.outcome_name_] == y):\n raise ValueError(\".fit_transform(X, y) called with y != X[outcome_name]\")\n if not X.shape[0] == len(y):\n raise ValueError(\"X.shape[0] should equal len(y)\")\n y = vtreat.util.safe_to_numeric_array(y)\n if vtreat.util.is_bad(y).sum() > 0:\n raise ValueError(\"y should not have any missing/NA/NaN values\")\n if numpy.max(y) <= numpy.min(y):\n raise ValueError(\"y does not vary\")\n cross_rows = None\n cross_plan = None\n if self.params_['retain_cross_plan']:\n cross_rows = self.cross_rows_\n cross_plan = self.cross_plan_\n self.clear()\n self.last_fit_x_id_ = vtreat.util.hash_data_frame(X)\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n if isinstance(y, pandas.Series):\n y = y.reset_index(inplace=False, drop=True)\n # model for independent transforms\n self.plan_ = None\n self.score_frame_ = None\n self.plan_ = vtreat_impl.fit_numeric_outcome_treatment(\n X=X,\n y=y,\n var_list=self.var_list_,\n outcome_name=self.outcome_name_,\n cols_to_copy=self.cols_to_copy_,\n params=self.params_,\n imputation_map=self.imputation_map_,\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n if (cross_plan is None) or (cross_rows != X.shape[0]):\n if cross_plan is not None:\n warnings.warn(\"Number of rows different than previous fit with retain_cross_plan==True\")\n cross_plan = self.params_[\"cross_validation_plan\"].split_plan(\n n_rows=X.shape[0], k_folds=self.params_[\"cross_validation_k\"], data=X, y=y\n )\n cross_rows = X.shape[0]\n # patch in cross-frame versions of complex columns such as impact\n cross_frame = vtreat_impl.cross_patch_refit_y_aware_cols(\n x=X, y=y, res=res, plan=self.plan_, cross_plan=cross_plan\n )\n cross_frame = vtreat_impl.cross_patch_user_y_aware_cols(\n x=cross_frame,\n y=y,\n res=res,\n params=self.params_,\n cross_plan=cross_plan,\n )\n # use cross_frame to compute variable effects\n self.score_frame_ = vtreat_impl.score_plan_variables(\n cross_frame=cross_frame,\n outcome=y,\n plan=self.plan_,\n params=self.params_,\n is_classification=False\n )\n if (\"filter_to_recommended\" in self.params_.keys()) and self.params_[\"filter_to_recommended\"]:\n self.set_result_restriction(\n set([ci for ci in self.score_frame_[\"variable\"][self.score_frame_[\"recommended\"]]]))\n cross_frame = vtreat_impl.limit_to_appropriate_columns(\n res=cross_frame, transform=self\n )\n cross_frame, res_columns = vtreat_impl.back_to_orig_type_data_frame(cross_frame, orig_type)\n self.last_result_columns = res_columns\n if self.params_['retain_cross_plan']:\n self.cross_plan_ = cross_plan\n self.cross_rows_ = cross_rows\n else:\n self.cross_plan_ = None\n self.cross_rows_ = None\n return cross_frame\n\n\nclass BinomialOutcomeTreatment(vtreat_impl.VariableTreatment):\n \"\"\"manage a treatment plan for a target outcome (binomial classification)\"\"\"\n\n def __init__(\n self,\n *,\n var_list=None,\n outcome_name=None,\n outcome_target=True,\n cols_to_copy=None,\n params=None,\n imputation_map=None,\n ):\n \"\"\"\n\n :param var_list: list or touple of column names\n :param outcome_name: name of column containing dependent variable\n :param outcome_target: value of outcome to consider \"positive\"\n :param cols_to_copy: list or touple of column names\n :param params: vtreat.vtreat_parameters()\n :param imputation_map: map of column names to custom missing imputation values or functions\n \"\"\"\n params = self.merge_params(params)\n vtreat_impl.VariableTreatment.__init__(\n self,\n var_list=var_list,\n outcome_name=outcome_name,\n outcome_target=outcome_target,\n cols_to_copy=cols_to_copy,\n params=params,\n imputation_map=imputation_map,\n )\n\n def merge_params(self, p):\n return vtreat_parameters(p)\n\n # noinspection PyPep8Naming\n def transform(self, X):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if self.last_fit_x_id_ is None:\n raise ValueError(\"called transform on not yet fit treatment\")\n if self.params_['check_for_duplicate_frames'] and (self.last_fit_x_id_ == vtreat.util.hash_data_frame(X)):\n if self.params_[\"error_on_duplicate_frames\"]:\n raise ValueError(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n warnings.warn(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n res = vtreat_impl.limit_to_appropriate_columns(res=res, transform=self)\n res, res_columns = vtreat_impl.back_to_orig_type_data_frame(res, orig_type)\n self.last_result_columns = res_columns\n return res\n\n # noinspection PyPep8Naming\n def fit_transform(self, X, y=None, **fit_params):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if y is None:\n if self.outcome_name_ is None:\n raise ValueError(\".fit_transform(X) must have outcome_name set\")\n y = numpy.asarray(X[self.outcome_name_])\n else:\n y = numpy.asarray(y)\n if (self.outcome_name_ is not None) and (self.outcome_name_ in X.columns):\n if not numpy.all(X[self.outcome_name_] == y):\n raise ValueError(\".fit_transform(X, y) called with y != X[outcome_name]\")\n if not X.shape[0] == len(y):\n raise ValueError(\"X.shape[0] should equal len(y)\")\n y_mean = numpy.mean(y == self.outcome_target_)\n if y_mean <= 0 or y_mean >= 1:\n raise ValueError(\"y==outcome_target does not vary\")\n cross_rows = None\n cross_plan = None\n if self.params_['retain_cross_plan']:\n cross_rows = self.cross_rows_\n cross_plan = self.cross_plan_\n self.clear()\n self.last_fit_x_id_ = vtreat.util.hash_data_frame(X)\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n if isinstance(y, pandas.Series):\n y = y.reset_index(inplace=False, drop=True)\n # model for independent transforms\n self.plan_ = None\n self.score_frame_ = None\n self.plan_ = vtreat_impl.fit_binomial_outcome_treatment(\n X=X,\n y=y,\n outcome_target=self.outcome_target_,\n var_list=self.var_list_,\n outcome_name=self.outcome_name_,\n cols_to_copy=self.cols_to_copy_,\n params=self.params_,\n imputation_map=self.imputation_map_,\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n if (cross_plan is None) or (cross_rows != X.shape[0]):\n if cross_plan is not None:\n warnings.warn(\"Number of rows different than previous fit with retain_cross_plan==True\")\n cross_plan = self.params_[\"cross_validation_plan\"].split_plan(\n n_rows=X.shape[0], k_folds=self.params_[\"cross_validation_k\"], data=X, y=y\n )\n cross_rows = X.shape[0]\n # patch in cross-frame versions of complex columns such as impact\n cross_frame = vtreat_impl.cross_patch_refit_y_aware_cols(\n x=X, y=y, res=res, plan=self.plan_, cross_plan=cross_plan\n )\n cross_frame = vtreat_impl.cross_patch_user_y_aware_cols(\n x=cross_frame,\n y=y,\n res=res,\n params=self.params_,\n cross_plan=cross_plan,\n )\n # use cross_frame to compute variable effects\n self.score_frame_ = vtreat_impl.score_plan_variables(\n cross_frame=cross_frame,\n outcome=numpy.asarray(\n numpy.asarray(y) == self.outcome_target_, dtype=float\n ),\n plan=self.plan_,\n params=self.params_,\n is_classification=True\n )\n if (\"filter_to_recommended\" in self.params_.keys()) and self.params_[\"filter_to_recommended\"]:\n self.set_result_restriction(\n set([ci for ci in self.score_frame_[\"variable\"][self.score_frame_[\"recommended\"]]]))\n cross_frame = vtreat_impl.limit_to_appropriate_columns(\n res=cross_frame, transform=self\n )\n cross_frame, res_columns = vtreat_impl.back_to_orig_type_data_frame(cross_frame, orig_type)\n self.last_result_columns = res_columns\n if self.params_['retain_cross_plan']:\n self.cross_plan_ = cross_plan\n self.cross_rows_ = cross_rows\n else:\n self.cross_plan_ = None\n self.cross_rows_ = None\n return cross_frame\n\n\nclass MultinomialOutcomeTreatment(vtreat_impl.VariableTreatment):\n \"\"\"manage a treatment plan for a set of outcomes (multinomial classification)\"\"\"\n\n def __init__(\n self,\n *,\n var_list=None,\n outcome_name=None,\n cols_to_copy=None,\n params=None,\n imputation_map=None,\n ):\n \"\"\"\n\n :param var_list: list or touple of column names\n :param outcome_name: name of column containing dependent variable\n :param cols_to_copy: list or touple of column names\n :param params: vtreat.vtreat_parameters()\n :param imputation_map: map of column names to custom missing imputation values or functions\n \"\"\"\n\n params = self.merge_params(params)\n vtreat_impl.VariableTreatment.__init__(\n self,\n var_list=var_list,\n outcome_name=outcome_name,\n cols_to_copy=cols_to_copy,\n params=params,\n imputation_map=imputation_map,\n )\n self.outcomes_ = None\n\n def merge_params(self, p):\n return vtreat_parameters(p)\n\n # noinspection PyPep8Naming\n def transform(self, X):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if self.last_fit_x_id_ is None:\n raise ValueError(\"called transform on not yet fit treatment\")\n if self.params_['check_for_duplicate_frames'] and (self.last_fit_x_id_ == vtreat.util.hash_data_frame(X)):\n if self.params_[\"error_on_duplicate_frames\"]:\n raise ValueError(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n warnings.warn(\n \"possibly called transform on same data used to fit\\n\" +\n \"(this causes over-fit, please use fit_transform() instead)\")\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n res = vtreat_impl.limit_to_appropriate_columns(res=res, transform=self)\n res, res_columns = vtreat_impl.back_to_orig_type_data_frame(res, orig_type)\n self.last_result_columns = res_columns\n return res\n\n # noinspection PyPep8Naming\n def fit_transform(self, X, y=None, **fit_params):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if y is None:\n if self.outcome_name_ is None:\n raise ValueError(\".fit_transform(X) must have outcome_name set\")\n y = numpy.asarray(X[self.outcome_name_])\n else:\n y = numpy.asarray(y)\n if (self.outcome_name_ is not None) and (self.outcome_name_ in X.columns):\n if not numpy.all(X[self.outcome_name_] == y):\n raise ValueError(\".fit_transform(X, y) called with y != X[outcome_name]\")\n if not X.shape[0] == len(y):\n raise ValueError(\"X.shape[0] should equal len(y)\")\n if len(numpy.unique(y)) <= 1:\n raise ValueError(\"y must take on at least 2 values\")\n cross_rows = None\n cross_plan = None\n if self.params_['retain_cross_plan']:\n cross_rows = self.cross_rows_\n cross_plan = self.cross_plan_\n self.clear()\n self.last_fit_x_id_ = vtreat.util.hash_data_frame(X)\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n if isinstance(y, pandas.Series):\n y = y.reset_index(inplace=False, drop=True)\n # model for independent transforms\n self.plan_ = None\n self.score_frame_ = None\n self.outcomes_ = numpy.unique(y)\n self.plan_ = vtreat_impl.fit_multinomial_outcome_treatment(\n X=X,\n y=y,\n var_list=self.var_list_,\n outcome_name=self.outcome_name_,\n cols_to_copy=self.cols_to_copy_,\n params=self.params_,\n imputation_map=self.imputation_map_,\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n if (cross_plan is None) or (cross_rows != X.shape[0]):\n if cross_plan is not None:\n warnings.warn(\"Number of rows different than previous fit with retain_cross_plan==True\")\n cross_plan = self.params_[\"cross_validation_plan\"].split_plan(\n n_rows=X.shape[0], k_folds=self.params_[\"cross_validation_k\"], data=X, y=y\n )\n cross_rows = X.shape[0]\n cross_frame = vtreat_impl.cross_patch_refit_y_aware_cols(\n x=X, y=y, res=res, plan=self.plan_, cross_plan=cross_plan\n )\n cross_frame = vtreat_impl.cross_patch_user_y_aware_cols(\n x=cross_frame,\n y=y,\n res=res,\n params=self.params_,\n cross_plan=cross_plan,\n )\n # use cross_frame to compute variable effects\n\n def si(oi):\n sf = vtreat_impl.score_plan_variables(\n cross_frame=cross_frame,\n outcome=numpy.asarray(numpy.asarray(y) == oi, dtype=float),\n plan=self.plan_,\n params=self.params_,\n is_classification=True\n )\n sf[\"outcome_target\"] = oi\n return sf\n\n score_frames = [si(oi) for oi in self.outcomes_]\n self.score_frame_ = pandas.concat(score_frames, axis=0)\n self.score_frame_.reset_index(inplace=True, drop=True)\n if (\"filter_to_recommended\" in self.params_.keys()) and self.params_[\"filter_to_recommended\"]:\n self.set_result_restriction(\n set([ci for ci in self.score_frame_[\"variable\"][self.score_frame_[\"recommended\"]]]))\n cross_frame = vtreat_impl.limit_to_appropriate_columns(\n res=cross_frame, transform=self\n )\n cross_frame, res_columns = vtreat_impl.back_to_orig_type_data_frame(cross_frame, orig_type)\n self.last_result_columns = res_columns\n if self.params_['retain_cross_plan']:\n self.cross_plan_ = cross_plan\n self.cross_rows_ = cross_rows\n else:\n self.cross_plan_ = None\n self.cross_rows_ = None\n return cross_frame\n\n\nclass UnsupervisedTreatment(vtreat_impl.VariableTreatment):\n \"\"\"manage an unsupervised treatment plan\"\"\"\n\n def __init__(self,\n *,\n var_list=None,\n cols_to_copy=None,\n params=None,\n imputation_map=None):\n \"\"\"\n\n :param var_list: list or touple of column names\n :param cols_to_copy: list or touple of column names\n :param params: vtreat.unsupervised_parameters()\n :param imputation_map: map of column names to custom missing imputation values or functions\n \"\"\"\n params = self.merge_params(params)\n vtreat_impl.VariableTreatment.__init__(\n self,\n var_list=var_list,\n outcome_name=None,\n cols_to_copy=cols_to_copy,\n params=params,\n imputation_map=imputation_map,\n )\n\n def merge_params(self, p):\n return unsupervised_parameters(p)\n\n # noinspection PyPep8Naming\n def transform(self, X):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if self.last_fit_x_id_ is None:\n raise ValueError(\"called transform on not yet fit treatment\")\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n res = vtreat_impl.limit_to_appropriate_columns(res=res, transform=self)\n res, res_columns = vtreat_impl.back_to_orig_type_data_frame(res, orig_type)\n self.last_result_columns = res_columns\n return res\n\n # noinspection PyPep8Naming\n def fit_transform(self, X, y=None, **fit_params):\n X, orig_type = vtreat_impl.ready_data_frame(X)\n self.check_column_names(X.columns)\n if y is not None:\n raise ValueError(\"y should be None\")\n self.clear()\n self.last_fit_x_id_ = vtreat.util.hash_data_frame(X)\n X = vtreat_impl.pre_prep_frame(\n X, col_list=self.var_list_, cols_to_copy=self.cols_to_copy_\n )\n self.plan_ = vtreat_impl.fit_unsupervised_treatment(\n X=X,\n var_list=self.var_list_,\n outcome_name=self.outcome_name_,\n cols_to_copy=self.cols_to_copy_,\n params=self.params_,\n imputation_map=self.imputation_map_,\n )\n res = vtreat_impl.perform_transform(x=X, transform=self, params=self.params_)\n self.score_frame_ = vtreat_impl.pseudo_score_plan_variables(\n cross_frame=res, plan=self.plan_, params=self.params_\n )\n if (\"filter_to_recommended\" in self.params_.keys()) and self.params_[\"filter_to_recommended\"]:\n self.set_result_restriction(\n set([ci for ci in self.score_frame_[\"variable\"][self.score_frame_[\"recommended\"]]]))\n res = vtreat_impl.limit_to_appropriate_columns(res=res, transform=self)\n res, res_columns = vtreat_impl.back_to_orig_type_data_frame(res, orig_type)\n self.last_result_columns = res_columns\n return res\n"
] |
[
[
"pandas.concat",
"numpy.unique",
"numpy.asarray",
"numpy.min",
"numpy.all",
"numpy.max",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dmadeka/ray
|
[
"4f8e100fe0417da4fe1098defbfa478088502244"
] |
[
"python/ray/experimental/sgd/pytorch/utils.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\nfrom contextlib import closing\nimport numpy as np\nimport socket\nimport time\nimport torch\nimport torch.nn as nn\n\n\ndef train(train_iterator, model, criterion, optimizer):\n \"\"\"Runs 1 training epoch\"\"\"\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n timers = {k: TimerStat() for k in [\"d2h\", \"fwd\", \"grad\", \"apply\"]}\n\n # switch to train mode\n model.train()\n\n end = time.time()\n\n for i, (features, target) in enumerate(train_iterator):\n # measure data loading time\n data_time.update(time.time() - end)\n\n # Create non_blocking tensors for distributed training\n with timers[\"d2h\"]:\n if torch.cuda.is_available():\n features = features.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n # compute output\n with timers[\"fwd\"]:\n output = model(features)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n losses.update(loss.item(), features.size(0))\n\n with timers[\"grad\"]:\n # compute gradients in a backward pass\n optimizer.zero_grad()\n loss.backward()\n\n with timers[\"apply\"]:\n # Call step of optimizer to update model params\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n stats = {\n \"batch_time\": batch_time.avg,\n \"batch_processed\": losses.count,\n \"train_loss\": losses.avg,\n \"data_time\": data_time.avg,\n }\n stats.update({k: t.mean for k, t in timers.items()})\n return stats\n\n\ndef validate(val_loader, model, criterion):\n batch_time = AverageMeter()\n losses = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (features, target) in enumerate(val_loader):\n\n if torch.cuda.is_available():\n features = features.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n # compute output\n output = model(features)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n losses.update(loss.item(), features.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n stats = {\"batch_time\": batch_time.avg, \"validation_loss\": losses.avg}\n return stats\n\n\nclass TimerStat(object):\n \"\"\"A running stat for conveniently logging the duration of a code block.\n\n Note that this class is *not* thread-safe.\n\n Examples:\n Time a call to 'time.sleep'.\n\n >>> import time\n >>> sleep_timer = TimerStat()\n >>> with sleep_timer:\n ... time.sleep(1)\n >>> round(sleep_timer.mean)\n 1\n \"\"\"\n\n def __init__(self, window_size=10):\n self._window_size = window_size\n self._samples = []\n self._units_processed = []\n self._start_time = None\n self._total_time = 0.0\n self.count = 0\n\n def __enter__(self):\n assert self._start_time is None, \"concurrent updates not supported\"\n self._start_time = time.time()\n\n def __exit__(self, type, value, tb):\n assert self._start_time is not None\n time_delta = time.time() - self._start_time\n self.push(time_delta)\n self._start_time = None\n\n def push(self, time_delta):\n self._samples.append(time_delta)\n if len(self._samples) > self._window_size:\n self._samples.pop(0)\n self.count += 1\n self._total_time += time_delta\n\n def push_units_processed(self, n):\n self._units_processed.append(n)\n if len(self._units_processed) > self._window_size:\n self._units_processed.pop(0)\n\n @property\n def mean(self):\n return np.mean(self._samples)\n\n @property\n def median(self):\n return np.median(self._samples)\n\n @property\n def sum(self):\n return np.sum(self._samples)\n\n @property\n def max(self):\n return np.max(self._samples)\n\n @property\n def first(self):\n return self._samples[0] if self._samples else None\n\n @property\n def last(self):\n return self._samples[-1] if self._samples else None\n\n @property\n def size(self):\n return len(self._samples)\n\n @property\n def mean_units_processed(self):\n return float(np.mean(self._units_processed))\n\n @property\n def mean_throughput(self):\n time_total = sum(self._samples)\n if not time_total:\n return 0.0\n return sum(self._units_processed) / time_total\n\n def reset(self):\n self._samples = []\n self._units_processed = []\n self._start_time = None\n self._total_time = 0.0\n self.count = 0\n\n\ndef find_free_port():\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind((\"\", 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass Resources(\n namedtuple(\"Resources\", [\"num_cpus\", \"num_gpus\", \"resources\"])):\n __slots__ = ()\n\n def __new__(cls, num_cpus=1, num_gpus=0, resources=None):\n if resources is None:\n resources = {}\n\n return super(Resources, cls).__new__(cls, num_cpus, num_gpus,\n resources)\n\n\ndef sgd_mse_optimizer(model, config):\n \"\"\"Returns the mean squared error criterion and SGD optimizer.\n\n Args:\n model (torch.nn.Module): the model to optimize.\n config (dict): configuration for the optimizer.\n lr (float): the learning rate. defaults to 0.01.\n \"\"\"\n learning_rate = config.get(\"lr\", 0.01)\n criterion = nn.MSELoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\n return criterion, optimizer\n"
] |
[
[
"torch.nn.MSELoss",
"numpy.median",
"numpy.max",
"numpy.mean",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
haydarai/dagster
|
[
"9b9c78e332f976f196d17a38c9840f53679d94cd"
] |
[
"python_modules/libraries/dagster-dbt/dagster_dbt/rpc/solids.py"
] |
[
"import json\nimport time\nfrom typing import Callable, Iterator, Optional\n\nimport pandas as pd\nfrom dagster_pandas import DataFrame\n\nfrom dagster import (\n Array,\n AssetMaterialization,\n Bool,\n DagsterInvalidDefinitionError,\n EventMetadataEntry,\n Failure,\n Field,\n InputDefinition,\n Int,\n Noneable,\n Nothing,\n Output,\n OutputDefinition,\n Permissive,\n RetryRequested,\n String,\n check,\n solid,\n)\nfrom dagster.core.execution.context.compute import SolidExecutionContext\n\nfrom ..errors import DagsterDbtRpcUnexpectedPollOutputError\nfrom .types import DbtRpcOutput\nfrom .utils import log_rpc, raise_for_rpc_error\n\n\ndef _generate_materializations(dro: DbtRpcOutput) -> Iterator[AssetMaterialization]:\n \"\"\"Yields ``AssetMaterializations`` for metadata in the dbt RPC ``DbtRpcOutput``.\"\"\"\n for node_result in dro.result.results:\n if node_result.node[\"resource_type\"] in [\"model\", \"snapshot\"]:\n success = not node_result.fail and not node_result.skip and not node_result.error\n if success:\n entries = [\n EventMetadataEntry.json(data=node_result.node, label=\"Node\"),\n EventMetadataEntry.text(text=str(node_result.status), label=\"Status\"),\n EventMetadataEntry.text(\n text=str(node_result.execution_time), label=\"Execution Time\"\n ),\n EventMetadataEntry.text(\n text=node_result.node[\"config\"][\"materialized\"],\n label=\"Materialization Strategy\",\n ),\n EventMetadataEntry.text(text=node_result.node[\"database\"], label=\"Database\"),\n EventMetadataEntry.text(text=node_result.node[\"schema\"], label=\"Schema\"),\n EventMetadataEntry.text(text=node_result.node[\"alias\"], label=\"Alias\"),\n EventMetadataEntry.text(\n text=node_result.node[\"description\"], label=\"Description\"\n ),\n ]\n for step_timing in node_result.step_timings:\n if step_timing.name == \"execute\":\n execution_entries = [\n EventMetadataEntry.text(\n text=step_timing.started_at.isoformat(timespec=\"seconds\"),\n label=\"Execution Started At\",\n ),\n EventMetadataEntry.text(\n text=step_timing.completed_at.isoformat(timespec=\"seconds\"),\n label=\"Execution Completed At\",\n ),\n EventMetadataEntry.text(\n text=str(step_timing.duration), label=\"Execution Duration\"\n ),\n ]\n entries.extend(execution_entries)\n if step_timing.name == \"compile\":\n execution_entries = [\n EventMetadataEntry.text(\n text=step_timing.started_at.isoformat(timespec=\"seconds\"),\n label=\"Compilation Started At\",\n ),\n EventMetadataEntry.text(\n text=step_timing.completed_at.isoformat(timespec=\"seconds\"),\n label=\"Compilation Completed At\",\n ),\n EventMetadataEntry.text(\n text=str(step_timing.duration), label=\"Compilation Duration\"\n ),\n ]\n entries.extend(execution_entries)\n\n yield AssetMaterialization(\n description=\"A materialized node within the dbt graph.\",\n metadata_entries=entries,\n asset_key=node_result.node[\"unique_id\"],\n )\n\n\ndef _poll_rpc(\n context: SolidExecutionContext, request_token: str, should_yield_materializations: bool = True\n) -> DbtRpcOutput:\n \"\"\"Polls the dbt RPC server for the status of a request until the state is ``success``.\"\"\"\n logs_start = 0\n while True:\n # Poll for the dbt RPC request.\n context.log.debug(f\"RequestToken: {request_token}\")\n resp = context.resources.dbt_rpc.poll(\n request_token=request_token, logs=context.solid_config[\"logs\"], logs_start=logs_start\n )\n raise_for_rpc_error(context, resp)\n\n # Pass dbt RPC logs into the Dagster/Dagit logger.\n if context.solid_config[\"logs\"]:\n logs = resp.json().get(\"result\").get(\"logs\")\n if len(logs) > 0:\n log_rpc(context, logs)\n logs_start += len(logs)\n\n # Stop polling if request's state is no longer \"running\".\n if resp.json().get(\"result\").get(\"state\") != \"running\":\n break\n\n # Sleep for the configured time intervale before polling again.\n context.log.debug(\n f\"Request {request_token} currently in state '{resp.json().get('result').get('state')}' (elapsed time {resp.json().get('result').get('elapsed', 0)} seconds). Sleeping for {context.solid_config.get('interval')}s..\"\n )\n time.sleep(context.solid_config[\"interval\"])\n\n if resp.json().get(\"result\").get(\"state\") != \"success\":\n raise Failure(\n description=f\"Request {request_token} finished with state '{resp.json().get('result').get('state')}' in {resp.json().get('result').get('elapsed')} seconds\",\n )\n\n context.log.info(\n f\"Request {request_token} finished with state '{resp.json().get('result').get('state')}' in {resp.json().get('result').get('elapsed')} seconds\"\n )\n context.log.debug(json.dumps(resp.json().get(\"result\"), indent=2))\n\n polled_run_results = DbtRpcOutput.from_dict(resp.json().get(\"result\"))\n\n if should_yield_materializations:\n for materialization in _generate_materializations(polled_run_results):\n yield materialization\n\n yield Output(polled_run_results)\n\n\ndef unwrap_result(poll_rpc_generator) -> DbtRpcOutput:\n \"\"\"A helper function that extracts the `DbtRpcOutput` value from a generator.\n\n The parameter `poll_rpc_generator` is expected to be an invocation of `_poll_rpc`.\n \"\"\"\n output = None\n for x in poll_rpc_generator:\n output = x\n\n if output is None:\n raise DagsterDbtRpcUnexpectedPollOutputError(\n description=\"poll_rpc yielded None as its last value. Expected value of type Output containing DbtRpcOutput.\",\n )\n\n if not isinstance(output, Output):\n raise DagsterDbtRpcUnexpectedPollOutputError(\n description=f\"poll_rpc yielded value of type {type(output)} as its last value. Expected value of type Output containing DbtRpcOutput.\",\n )\n\n if not isinstance(output.value, DbtRpcOutput):\n raise DagsterDbtRpcUnexpectedPollOutputError(\n description=f\"poll_rpc yielded Output containing {type(output.value)}. Expected DbtRpcOutput.\",\n )\n\n return output.value\n\n\n@solid(\n description=\"A solid to invoke dbt run over RPC.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[\n OutputDefinition(\n name=\"request_token\",\n dagster_type=String,\n description=\"The request token of the invoked dbt run.\",\n )\n ],\n config_schema={\n \"models\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to run.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to exclude.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_run(context: SolidExecutionContext) -> String:\n \"\"\"This solid sends the ``dbt run`` command to a dbt RPC server and returns the request token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n resp = context.resources.dbt_rpc.run(\n models=context.solid_config[\"models\"], exclude=context.solid_config[\"exclude\"]\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n return resp.json().get(\"result\").get(\"request_token\")\n\n\n@solid(\n description=\"A solid to invoke dbt run over RPC and poll the resulting RPC process until it's complete.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[OutputDefinition(name=\"result\", dagster_type=DbtRpcOutput)],\n config_schema={\n \"models\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to run.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to exclude.\",\n ),\n \"full_refresh\": Field(\n config=Bool,\n description=\"Whether or not to perform a --full-refresh.\",\n is_required=False,\n default_value=False,\n ),\n \"fail_fast\": Field(\n config=Bool,\n description=\"Whether or not to --fail-fast.\",\n is_required=False,\n default_value=False,\n ),\n \"warn_error\": Field(\n config=Bool,\n description=\"Whether or not to --warn-error.\",\n is_required=False,\n default_value=False,\n ),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n \"task_tags\": Permissive(),\n \"max_retries\": Field(config=Int, is_required=False, default_value=5),\n \"retry_interval\": Field(config=Int, is_required=False, default_value=120),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_run_and_wait(context: SolidExecutionContext) -> DbtRpcOutput:\n \"\"\"This solid sends the ``dbt run`` command to a dbt RPC server and returns the result of the\n executed dbt process.\n\n This dbt RPC solid is synchronous, and will periodically poll the dbt RPC server until the dbt\n process is completed.\n \"\"\"\n if context.solid_config[\"task_tags\"]:\n results = context.resources.dbt_rpc.ps().json()\n for task in results[\"result\"][\"rows\"]:\n if task[\"tags\"] == context.solid_config[\"task_tags\"]:\n context.log.warning(\n f\"RPC task with tags {json.dumps(task['tags'])} currently running.\"\n )\n raise RetryRequested(\n max_retries=context.solid_config[\"max_retries\"],\n seconds_to_wait=context.solid_config[\"retry_interval\"],\n )\n\n command = \"\"\n\n if context.solid_config[\"warn_error\"]:\n command += \" --warn-error\"\n\n command += \" run\"\n\n if context.solid_config[\"models\"]:\n models = \" \".join(set(context.solid_config[\"models\"]))\n command += f\" --models {models}\"\n\n if context.solid_config[\"exclude\"]:\n exclude = \" \".join(set(context.solid_config[\"exclude\"]))\n command += f\" --exclude {exclude}\"\n\n if context.solid_config[\"full_refresh\"]:\n command += \" --full-refresh\"\n\n if context.solid_config[\"fail_fast\"]:\n command += \" --fail-fast\"\n\n context.log.debug(f\"Running dbt command: dbt {command}\")\n resp = context.resources.dbt_rpc.cli(cli=command, **context.solid_config[\"task_tags\"])\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n return _poll_rpc(context, request_token)\n\n\n@solid(\n description=\"A solid to invoke dbt test over RPC.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[\n OutputDefinition(\n name=\"request_token\",\n dagster_type=String,\n description=\"The request token of the invoked dbt test.\",\n )\n ],\n config_schema={\n \"models\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to test.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to exclude.\",\n ),\n \"data\": Field(\n config=Bool,\n default_value=True,\n is_required=False,\n description=\"Whether or not to run custom data tests.\",\n ),\n \"schema\": Field(\n config=Bool,\n default_value=True,\n is_required=False,\n description=\"Whether or not to run schema tests.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_test(context: SolidExecutionContext) -> String:\n \"\"\"This solid sends the ``dbt test`` command to a dbt RPC server and returns the request token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n resp = context.resources.dbt_rpc.test(\n models=context.solid_config[\"models\"],\n exclude=context.solid_config[\"exclude\"],\n data=context.solid_config[\"data\"],\n schema=context.solid_config[\"schema\"],\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n return resp.json().get(\"result\").get(\"request_token\")\n\n\n@solid(\n description=\"A solid to invoke dbt test over RPC and poll the resulting RPC process until it's complete.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[OutputDefinition(name=\"result\", dagster_type=DbtRpcOutput)],\n config_schema={\n \"models\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to test.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt models to exclude.\",\n ),\n \"data\": Field(\n config=Bool,\n default_value=True,\n is_required=False,\n description=\"Whether or not to run custom data tests.\",\n ),\n \"schema\": Field(\n config=Bool,\n default_value=True,\n is_required=False,\n description=\"Whether or not to run schema tests.\",\n ),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_test_and_wait(context: SolidExecutionContext) -> DbtRpcOutput:\n \"\"\"This solid sends the ``dbt test`` command to a dbt RPC server and returns the result of the\n executed dbt process.\n\n This dbt RPC solid is synchronous, and will periodically poll the dbt RPC server until the dbt\n process is completed.\n \"\"\"\n resp = context.resources.dbt_rpc.test(\n models=context.solid_config[\"models\"],\n exclude=context.solid_config[\"exclude\"],\n data=context.solid_config[\"data\"],\n schema=context.solid_config[\"schema\"],\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n return _poll_rpc(context, request_token)\n\n\n@solid(\n description=\"A solid to invoke a dbt run operation over RPC.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[\n OutputDefinition(\n name=\"request_token\",\n dagster_type=String,\n description=\"The request token of the invoked dbt run operation.\",\n )\n ],\n config_schema={\n \"macro\": Field(\n config=String,\n is_required=True,\n description=\"The dbt macro to invoke as a run operation\",\n ),\n \"args\": Field(\n config=Noneable(Permissive()),\n is_required=False,\n default_value=None,\n description=\"Arguments to supply to the invoked macro.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_run_operation(context: SolidExecutionContext) -> String:\n \"\"\"This solid sends the ``dbt run-operation`` command to a dbt RPC server and returns the\n request token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n resp = context.resources.dbt_rpc.run_operation(\n macro=context.solid_config[\"macro\"], args=context.solid_config[\"args\"]\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n return resp.json().get(\"result\").get(\"request_token\")\n\n\n@solid(\n description=\"A solid to invoke a dbt run operation over RPC and poll the resulting RPC process until it's complete.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[OutputDefinition(name=\"result\", dagster_type=DbtRpcOutput)],\n config_schema={\n \"macro\": Field(\n config=String,\n is_required=True,\n description=\"The dbt macro to invoke as a run operation\",\n ),\n \"args\": Field(\n config=Noneable(Permissive()),\n is_required=False,\n default_value=None,\n description=\"Arguments to supply to the invoked macro.\",\n ),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_run_operation_and_wait(context: SolidExecutionContext) -> DbtRpcOutput:\n \"\"\"This solid sends the ``dbt run-operation`` command to a dbt RPC server and returns the result of the\n executed dbt process.\n\n This dbt RPC solid is synchronous, and will periodically poll the dbt RPC server until the dbt\n process is completed.\n \"\"\"\n resp = context.resources.dbt_rpc.run_operation(\n macro=context.solid_config[\"macro\"], args=context.solid_config[\"args\"]\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n return _poll_rpc(context, request_token)\n\n\n@solid(\n description=\"A solid to invoke a dbt snapshot over RPC.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[\n OutputDefinition(\n name=\"request_token\",\n dagster_type=String,\n description=\"The request token of the invoked dbt snapshot.\",\n )\n ],\n config_schema={\n \"select\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt snapshot files to snapshot.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt snapshot files to exclude from the snapshot.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_snapshot(context: SolidExecutionContext) -> String:\n \"\"\"This solid sends the ``dbt snapshot`` command to a dbt RPC server and returns the\n request token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n resp = context.resources.dbt_rpc.snapshot(\n select=context.solid_config[\"select\"], exclude=context.solid_config[\"exclude\"]\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n return resp.json().get(\"result\").get(\"request_token\")\n\n\n@solid(\n description=\"A solid to invoke a dbt snapshot over RPC and poll the resulting RPC process until it's complete.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[OutputDefinition(name=\"result\", dagster_type=DbtRpcOutput)],\n config_schema={\n \"select\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt snapshot files to snapshot.\",\n ),\n \"exclude\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt snapshot files to exclude from the snapshot.\",\n ),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n \"task_tags\": Permissive(),\n \"max_retries\": Field(config=Int, is_required=False, default_value=5),\n \"retry_interval\": Field(config=Int, is_required=False, default_value=120),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_snapshot_and_wait(context: SolidExecutionContext) -> DbtRpcOutput:\n \"\"\"This solid sends the ``dbt snapshot`` command to a dbt RPC server and returns the result of\n the executed dbt process.\n\n This dbt RPC solid is synchronous, and will periodically poll the dbt RPC server until the dbt\n process is completed.\n \"\"\"\n if context.solid_config[\"task_tags\"]:\n results = context.resources.dbt_rpc.ps().json()\n for task in results[\"result\"][\"rows\"]:\n if task[\"tags\"] == context.solid_config[\"task_tags\"]:\n context.log.warning(\n f\"RPC task with tags {json.dumps(task['tags'])} currently running.\"\n )\n raise RetryRequested(\n max_retries=context.solid_config[\"max_retries\"],\n seconds_to_wait=context.solid_config[\"retry_interval\"],\n )\n\n resp = context.resources.dbt_rpc.snapshot(\n select=context.solid_config[\"select\"], exclude=context.solid_config[\"exclude\"]\n )\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n return _poll_rpc(context, request_token)\n\n\n@solid(\n description=\"A solid to invoke dbt source snapshot-freshness over RPC.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[\n OutputDefinition(\n name=\"request_token\",\n dagster_type=String,\n description=\"The request token of the invoked dbt snapshot.\",\n )\n ],\n config_schema={\n \"select\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt sources to snapshot-freshness for.\",\n ),\n \"warn_error\": Field(\n config=Bool,\n description=\"Whether or not to --warn-error.\",\n is_required=False,\n default_value=False,\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_snapshot_freshness(context: SolidExecutionContext) -> String:\n \"\"\"This solid sends the ``dbt source snapshot-freshness`` command to a dbt RPC server and\n returns the request token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n command = \"\"\n\n if context.solid_config[\"warn_error\"]:\n command += \" --warn-error\"\n\n command += \" source snapshot-freshness\"\n\n if context.solid_config[\"select\"]:\n select = \" \".join(set(context.solid_config[\"select\"]))\n command += f\" --select {select}\"\n\n context.log.debug(f\"Running dbt command: dbt {command}\")\n resp = context.resources.dbt_rpc.cli(cli=command)\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n return resp.json().get(\"result\").get(\"request_token\")\n\n\n@solid(\n description=\"A solid to invoke dbt source snapshot-freshness over RPC and poll the resulting RPC process until it's complete.\",\n input_defs=[InputDefinition(name=\"start_after\", dagster_type=Nothing)],\n output_defs=[OutputDefinition(name=\"result\", dagster_type=DbtRpcOutput)],\n config_schema={\n \"select\": Field(\n config=Noneable(Array(String)),\n default_value=None,\n is_required=False,\n description=\"The dbt sources to snapshot-freshness for.\",\n ),\n \"warn_error\": Field(\n config=Bool,\n description=\"Whether or not to --warn-error.\",\n is_required=False,\n default_value=False,\n ),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_snapshot_freshness_and_wait(context: SolidExecutionContext) -> DbtRpcOutput:\n \"\"\"This solid sends the ``dbt source snapshot`` command to a dbt RPC server and returns the\n result of the executed dbt process.\n\n This dbt RPC solid is synchronous, and will periodically poll the dbt RPC server until the dbt\n process is completed.\n \"\"\"\n command = \"\"\n\n if context.solid_config[\"warn_error\"]:\n command += \" --warn-error\"\n\n command += \" source snapshot-freshness\"\n\n if context.solid_config[\"select\"]:\n select = \" \".join(set(context.solid_config[\"select\"]))\n command += f\" --select {select}\"\n\n context.log.debug(f\"Running dbt command: dbt {command}\")\n resp = context.resources.dbt_rpc.cli(cli=command)\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n return _poll_rpc(context, request_token)\n\n\n@solid(\n description=\"A solid to compile a SQL query in context of a dbt project over RPC.\",\n input_defs=[\n InputDefinition(name=\"start_after\", dagster_type=Nothing),\n InputDefinition(\n name=\"sql\", description=\"The SQL query to be compiled.\", dagster_type=String\n ),\n ],\n output_defs=[\n OutputDefinition(name=\"sql\", description=\"The compiled SQL query.\", dagster_type=String)\n ],\n config_schema={\n \"name\": Field(config=String),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n)\ndef dbt_rpc_compile_sql(context: SolidExecutionContext, sql: String) -> String:\n \"\"\"This solid sends the ``dbt compile`` command to a dbt RPC server and returns the request\n token.\n\n This dbt RPC solid is asynchronous. The request token can be used in subsequent RPC requests to\n poll the progress of the running dbt process.\n \"\"\"\n resp = context.resources.dbt_rpc.compile_sql(sql=sql, name=context.solid_config[\"name\"])\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n result = unwrap_result(_poll_rpc(context, request_token))\n return result.results[0].node[\"compiled_sql\"]\n\n\ndef create_dbt_rpc_run_sql_solid(\n name: str, output_def: Optional[OutputDefinition] = None, **kwargs\n) -> Callable:\n \"\"\"This function is a factory which constructs a solid that will copy the results of a SQL query\n run within the context of a dbt project to a pandas ``DataFrame``.\n\n Any kwargs passed to this function will be passed along to the underlying :func:`@solid\n <dagster.solid>` decorator. However, note that overriding ``config_schema``, ``input_defs``, and\n ``required_resource_keys`` is not allowed and will throw a :class:`DagsterInvalidDefinitionError\n <dagster.DagsterInvalidDefinitionError>`.\n\n If you would like to configure this solid with different config fields, you could consider using\n :func:`@composite_solid <dagster.composite_solid>` to wrap this solid.\n\n Args:\n name (str): The name of this solid.\n output_def (OutputDefinition, optional): The :class:`OutputDefinition\n <dagster.OutputDefinition>` for the solid. This value should always be a representation\n of a pandas ``DataFrame``. If not specified, the solid will default to an\n :class:`OutputDefinition <dagster.OutputDefinition>` named \"df\" with a ``DataFrame``\n dagster type.\n\n Returns:\n SolidDefinition: Returns the constructed solid definition.\n \"\"\"\n check.str_param(obj=name, param_name=\"name\")\n check.opt_inst_param(obj=output_def, param_name=\"output_def\", ttype=OutputDefinition)\n\n if \"config_schema\" in kwargs:\n raise DagsterInvalidDefinitionError(\"Overriding config_schema is not supported.\")\n\n if \"input_defs\" in kwargs:\n raise DagsterInvalidDefinitionError(\"Overriding input_defs is not supported.\")\n\n if \"required_resource_keys\" in kwargs:\n raise DagsterInvalidDefinitionError(\"Overriding required_resource_keys is not supported.\")\n\n @solid(\n name=name,\n description=kwargs.pop(\n \"description\",\n \"A solid to run a SQL query in context of a dbt project over RPC and return the results in a pandas DataFrame.\",\n ),\n input_defs=[\n InputDefinition(name=\"start_after\", dagster_type=Nothing),\n InputDefinition(\n name=\"sql\", description=\"The SQL query to be run.\", dagster_type=String\n ),\n ],\n output_defs=[\n output_def\n or OutputDefinition(\n name=\"df\", description=\"The results of the SQL query.\", dagster_type=DataFrame\n )\n ],\n config_schema={\n \"name\": Field(config=String),\n \"interval\": Field(\n config=Int,\n is_required=False,\n default_value=10,\n description=\"The interval (in seconds) at which to poll the dbt rpc process.\",\n ),\n \"logs\": Field(\n config=Bool,\n is_required=False,\n default_value=True,\n description=\"Whether or not to return logs from the process.\",\n ),\n },\n required_resource_keys={\"dbt_rpc\"},\n tags={\"kind\": \"dbt\"},\n **kwargs,\n )\n def _dbt_rpc_run_sql(context: SolidExecutionContext, sql: String) -> DataFrame:\n resp = context.resources.dbt_rpc.run_sql(sql=sql, name=context.solid_config[\"name\"])\n context.log.debug(resp.text)\n raise_for_rpc_error(context, resp)\n request_token = resp.json().get(\"result\").get(\"request_token\")\n result = unwrap_result(_poll_rpc(context, request_token))\n table = result.results[0].table\n return pd.DataFrame.from_records(data=table[\"rows\"], columns=table[\"column_names\"])\n\n return _dbt_rpc_run_sql\n"
] |
[
[
"pandas.DataFrame.from_records"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
wangrui1996/simple_pose_tensorflow
|
[
"6b97bf1cff7836eec638fe54e86e1ec203c0b79f"
] |
[
"utils/create_cpm_id_fulljoints.py"
] |
[
"import cv2\nimport cpm_utils\nimport numpy as np\nimport math\nimport tensorflow as tf\nimport time\nimport json\nimport random\nimport os\n\n\ntfr_file = 'cpm_sample_dataset.tfrecords'\ndataset_dir = '/Users/wangrui/Downloads/id_dataset/data'\n\nSHOW_INFO = False\nbox_size = 32\ninput_size = 256\nnum_of_joints = 6\ngaussian_radius = 2\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _float64_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\n# Create writer\ntfr_writer = tf.python_io.TFRecordWriter(tfr_file)\n\nimg_count = 0\nt1 = time.time()\nimages_dir = os.path.join(dataset_dir, \"images\")\nannotations_dir = os.path.join(dataset_dir, \"annotations\")\n# Loop each dir\nfor file_name in os.listdir(images_dir):\n\n image_path = os.path.join(images_dir, file_name)\n annotation_path = os.path.join(annotations_dir, \"{}.json\".format(file_name.split(\".\")[0]))\n\n #cur_img_path = dataset_dir + person_dir + '/imgs/' + line[0]\n cur_img = cv2.imread(image_path)\n print(image_path)\n inp_f = open(annotation_path, 'r')\n json_data = json.load(inp_f)\n #json_data[\"shapes\"] = \"\"\n\n def get_bbox_and_joints_from_json(shapes):\n assert len(shapes) == 2 # must be len is 2, one is bbox and annother is text\n assert shapes[0][\"label\"] in [\"zhen\",\"fan\",\"zheng\",\"text\"]\n assert shapes[1][\"label\"] in [\"zhen\",\"fan\",\"zheng\",\"text\"]\n bbox_idx = 0\n if shapes[bbox_idx][\"label\"]==\"text\":\n bbox_idx = 1\n\n bbox_point = shapes[bbox_idx][\"points\"]\n bx_x1, bx_y1 = bbox_point[0]\n bx_x2, bx_y2 = bbox_point[2]\n cur_id_bbox = [min([bx_x1, bx_x2]),\n min([bx_y1, bx_y2]),\n max([bx_x1, bx_x2]),\n max([bx_y1, bx_y2])]\n #if cur_hand_bbox[0] < 0: cur_hand_bbox[0] = 0\n #if cur_hand_bbox[1] < 0: cur_hand_bbox[1] = 0\n #if cur_hand_bbox[2] > cur_img.shape[1]: cur_hand_bbox[2] = cur_img.shape[1]\n #if cur_hand_bbox[3] > cur_img.shape[0]: cur_hand_bbox[3] = cur_img.shape[0]\n text_bx = shapes[1-bbox_idx][\"points\"]\n\n tmpx1,tmpy1 = text_bx[0]\n tmpx2,tmpy2 = text_bx[1]\n tmpx3,tmpy3 = text_bx[2]\n text_arr = np.array(text_bx).transpose()\n x_list = text_arr[0]\n y_list = text_arr[1]\n axis_1 = np.where(y_list==y_list.min())[0]\n axis_3 = np.where(x_list==x_list.max())[0]\n axis_2 = 3 - axis_1 - axis_3\n cur_id_joints_x = [-1 for _ in range(6)]\n cur_id_joints_y = [-1 for _ in range(6)]\n sub_add = 0\n is_zhen = True\n if shapes[bbox_idx][\"label\"] == \"fan\":\n is_zhen = False\n sub_add = 3\n\n cur_id_joints_x[sub_add] = x_list[axis_1][0]\n cur_id_joints_y[sub_add] = y_list[axis_1][0]\n cur_id_joints_x[sub_add+1] = x_list[axis_2][0]\n cur_id_joints_y[sub_add+1] = y_list[axis_2][0]\n cur_id_joints_x[sub_add+2] = x_list[axis_3][0]\n cur_id_joints_y[sub_add+2] = y_list[axis_3][0]\n return is_zhen, cur_id_bbox, cur_id_joints_x, cur_id_joints_y\n # Read in bbox and joints coords\n is_zhen, cur_id_bbox, cur_id_joints_x, cur_id_joints_y = get_bbox_and_joints_from_json(json_data[\"shapes\"])\n print(cur_id_bbox)\n if is_zhen:\n gauss_range_list = [0, 1, 2]\n else:\n gauss_range_list = [3, 4, 5]\n #exit(0)\n\n #cur_hand_joints_x = [float(i) for i in line[9:49:2]]\n #cur_hand_joints_x.append(float(line[7]))\n #cur_hand_joints_y = [float(i) for i in line[10:49:2]]\n #cur_hand_joints_y.append(float(line[8]))\n\n # Crop image and adjust joint coords\n cur_img = cur_img[int(float(cur_id_bbox[1])):int(float(cur_id_bbox[3])),\n int(float(cur_id_bbox[0])):int(float(cur_id_bbox[2])),\n :]\n\n #cv2.imshow(\"demo\", cur_img)\n cv2.imwrite(\"demo.jpg\", cur_img)\n #cv2.waitKey(0)\n #exit(0)\n cur_id_joints_x = [x - cur_id_bbox[0] for x in cur_id_joints_x]\n cur_id_joints_y = [x - cur_id_bbox[1] for x in cur_id_joints_y]\n\n # # Display joints\n # for i in range(len(cur_hand_joints_x)):\n # cv2.circle(cur_img, center=(int(cur_hand_joints_x[i]), int(cur_hand_joints_y[i])),radius=3, color=(255,0,0), thickness=-1)\n # cv2.imshow('', cur_img)\n # cv2.waitKey(500)\n # cv2.imshow('', cur_img)\n # cv2.waitKey(1)\n\n output_image = np.ones(shape=(input_size, input_size, 3)) * 128\n output_heatmaps = np.zeros((box_size, box_size, num_of_joints))\n\n # Resize and pad image to fit output image size\n if cur_img.shape[0] > cur_img.shape[1]:\n scale = input_size / (cur_img.shape[0] * 1.0)\n\n # Relocalize points\n cur_id_joints_x = map(lambda x: x * scale, cur_id_joints_x)\n cur_id_joints_y = map(lambda x: x * scale, cur_id_joints_y)\n\n # Resize image\n image = cv2.resize(cur_img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LANCZOS4)\n offset = image.shape[1] % 2\n\n output_image[:, int(input_size / 2 - math.floor(image.shape[1] / 2)): int(\n input_size / 2 + math.floor(image.shape[1] / 2) + offset), :] = image\n cur_id_joints_x = map(lambda x: x + (input_size / 2 - math.floor(image.shape[1] / 2)),\n cur_id_joints_x)\n scale = box_size / (cur_img.shape[0] * 1.0)\n # Relocalize points\n cur_id_joints_x = map(lambda x: x * scale, cur_id_joints_x)\n cur_id_joints_y = map(lambda x: x * scale, cur_id_joints_y)\n cur_id_joints_x = np.asarray(list(cur_id_joints_x))\n cur_id_joints_y = np.asarray(list(cur_id_joints_y))\n\n if SHOW_INFO:\n hmap = np.zeros((box_size, box_size))\n # Plot joints\n for i in range(num_of_joints):\n cv2.circle(output_image, (int(cur_id_joints_x[i]), int(cur_id_joints_y[i])), 3, (0, 255, 0), 2)\n\n # Generate joint gaussian map\n\n part_heatmap= cpm_utils.gaussian_img(box_size,box_size,cur_id_joints_x[i],cur_id_joints_y[i],1)\n #part_heatmap = utils.make_gaussian(output_image.shape[0], gaussian_radius,\n # [cur_hand_joints_x[i], cur_hand_joints_y[i]])\n hmap += part_heatmap * 50\n else:\n for i in range(num_of_joints):\n #output_heatmaps[:, :, i] = utils.make_gaussian(box_size, gaussian_radius,\n # [cur_hand_joints_x[i], cur_hand_joints_y[i]])\n if i in gauss_range_list:\n output_heatmaps[:, :, i]= cpm_utils.gaussian_img(box_size,box_size,cur_id_joints_x[i],cur_id_joints_y[i],1)\n\n else:\n scale = input_size / (cur_img.shape[1] * 1.0)\n\n # Relocalize points\n cur_id_joints_x = map(lambda x: x * scale, cur_id_joints_x)\n cur_id_joints_y = map(lambda x: x * scale, cur_id_joints_y)\n\n # Resize image\n image = cv2.resize(cur_img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LANCZOS4)\n offset = image.shape[0] % 2\n output_image[int(input_size / 2 - math.floor(image.shape[0] / 2)): int(\n input_size / 2 + math.floor(image.shape[0] / 2) + offset), :, :] = image\n cur_id_joints_y = map(lambda x: x + (input_size / 2 - math.floor(image.shape[0] / 2)),\n cur_id_joints_y)\n scale = box_size / (input_size * 1.0)\n # Relocalize points\n cur_id_joints_x = map(lambda x: x * scale, cur_id_joints_x)\n cur_id_joints_y = map(lambda x: x * scale, cur_id_joints_y)\n cur_id_joints_x = np.asarray(list(cur_id_joints_x))\n cur_id_joints_y = np.asarray(list(cur_id_joints_y))\n\n if SHOW_INFO:\n hmap = np.zeros((box_size, box_size))\n # Plot joints\n for i in range(num_of_joints):\n cv2.circle(output_image, (int(cur_id_joints_x[i]), int(cur_id_joints_y[i])), 3, (0, 255, 0), 2)\n\n # Generate joint gaussian map\n #part_heatmap = cpm_utils.make_gaussian(output_image.shape[0], gaussian_radius,\n # [cur_id_joints_x[i], cur_id_joints_y[i]])\n #hmap += part_heatmap * 50\n cv2.imshow(\"demo\", output_image)\n cv2.waitKey(0)\n else:\n for i in range(num_of_joints):\n if i in gauss_range_list:\n output_heatmaps[:, :, i] = cpm_utils.make_gaussian(box_size, gaussian_radius,\n [cur_id_joints_x[i], cur_id_joints_y[i]])\n if SHOW_INFO:\n cv2.imshow('', hmap.astype(np.uint8))\n cv2.imshow('i', output_image.astype(np.uint8))\n cv2.waitKey(0)\n\n # Create background map\n output_background_map = np.ones((box_size, box_size)) - np.amax(output_heatmaps, axis=2)\n output_heatmaps = np.concatenate((output_heatmaps, output_background_map.reshape((box_size, box_size, 1))),\n axis=2)\n # cv2.imshow('', (output_background_map*255).astype(np.uint8))\n # cv2.imshow('h', (np.amax(output_heatmaps[:, :, 0:21], axis=2)*255).astype(np.uint8))\n # cv2.waitKey(1000)\n\n\n coords_set = np.concatenate((np.reshape(cur_id_joints_x, (num_of_joints, 1)),\n np.reshape(cur_id_joints_y, (num_of_joints, 1))),\n axis=1)\n output_image_raw = output_image.astype(np.uint8).tostring()\n output_heatmaps_raw = output_heatmaps.flatten().tolist()\n output_coords_raw = coords_set.flatten().tolist()\n\n raw_sample = tf.train.Example(features=tf.train.Features(feature={\n 'image': _bytes_feature(output_image_raw),\n 'heatmaps': _float64_feature(output_heatmaps_raw)\n }))\n\n tfr_writer.write(raw_sample.SerializeToString())\n\n img_count += 1\n if img_count % 50 == 0:\n print('Processed %d images, took %f seconds' % (img_count, time.time() - t1))\n t1 = time.time()\n\ntfr_writer.close()\n"
] |
[
[
"numpy.amax",
"numpy.reshape",
"numpy.ones",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.BytesList",
"tensorflow.train.FloatList",
"numpy.array",
"numpy.zeros",
"tensorflow.train.Int64List"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lexical-kenobi/Face-Vision-3D_Pose
|
[
"07eee33d09018c99251051a983d3842212177e5a",
"07eee33d09018c99251051a983d3842212177e5a"
] |
[
"utils/paf.py",
"utils/inference.py"
] |
[
"#!/usr/bin/env python3\n# coding: utf-8\n\nimport numpy as np\nfrom .ddfa import _parse_param\nfrom .params import u_filter, w_filter, w_exp_filter, std_size, param_mean, param_std\n\n\ndef reconstruct_paf_anchor(param, whitening=True):\n if whitening:\n param = param * param_std + param_mean\n p, offset, alpha_shp, alpha_exp = _parse_param(param)\n anchor = p @ (u_filter + w_filter @ alpha_shp + w_exp_filter @ alpha_exp).reshape(3, -1, order='F') + offset\n anchor[1, :] = std_size + 1 - anchor[1, :]\n return anchor[:2, :]\n\n\ndef gen_offsets(kernel_size):\n offsets = np.zeros((2, kernel_size * kernel_size), dtype=np.int)\n ind = 0\n delta = (kernel_size - 1) // 2\n for i in range(kernel_size):\n y = i - delta\n for j in range(kernel_size):\n x = j - delta\n offsets[0, ind] = x\n offsets[1, ind] = y\n ind += 1\n return offsets\n\n\ndef gen_img_paf(img_crop, param, kernel_size=3):\n \"\"\"Generate PAF image\n img_crop: 120x120\n kernel_size: kernel_size for convolution, should be even number like 3 or 5 or ...\n \"\"\"\n anchor = reconstruct_paf_anchor(param)\n anchor = np.round(anchor).astype(np.int)\n delta = (kernel_size - 1) // 2\n anchor[anchor < delta] = delta\n anchor[anchor >= std_size - delta - 1] = std_size - delta - 1\n\n img_paf = np.zeros((64 * kernel_size, 64 * kernel_size, 3), dtype=np.uint8)\n offsets = gen_offsets(kernel_size)\n for i in range(kernel_size * kernel_size):\n ox, oy = offsets[:, i]\n index0 = anchor[0] + ox\n index1 = anchor[1] + oy\n p = img_crop[index1, index0].reshape(64, 64, 3).transpose(1, 0, 2)\n\n img_paf[oy + delta::kernel_size, ox + delta::kernel_size] = p\n\n return img_paf\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python3\n# coding: utf-8\n__author__ = 'cleardusk'\n\nimport numpy as np\nfrom math import sqrt\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nfrom .ddfa import reconstruct_vertex\n\n\ndef get_suffix(filename):\n \"\"\"a.jpg -> jpg\"\"\"\n pos = filename.rfind('.')\n if pos == -1:\n return ''\n return filename[pos:]\n\n\ndef crop_img(img, roi_box):\n h, w = img.shape[:2]\n\n sx, sy, ex, ey = [int(round(_)) for _ in roi_box]\n dh, dw = ey - sy, ex - sx\n if len(img.shape) == 3:\n res = np.zeros((dh, dw, 3), dtype=np.uint8)\n else:\n res = np.zeros((dh, dw), dtype=np.uint8)\n if sx < 0:\n sx, dsx = 0, -sx\n else:\n dsx = 0\n\n if ex > w:\n ex, dex = w, dw - (ex - w)\n else:\n dex = dw\n\n if sy < 0:\n sy, dsy = 0, -sy\n else:\n dsy = 0\n\n if ey > h:\n ey, dey = h, dh - (ey - h)\n else:\n dey = dh\n\n res[dsy:dey, dsx:dex] = img[sy:ey, sx:ex]\n return res\n\n\ndef calc_hypotenuse(pts):\n bbox = [min(pts[0, :]), min(pts[1, :]), max(pts[0, :]), max(pts[1, :])]\n center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]\n radius = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 2\n bbox = [center[0] - radius, center[1] - radius, center[0] + radius, center[1] + radius]\n llength = sqrt((bbox[2] - bbox[0]) ** 2 + (bbox[3] - bbox[1]) ** 2)\n return llength / 3\n\n\ndef parse_roi_box_from_landmark(pts):\n \"\"\"calc roi box from landmark\"\"\"\n bbox = [min(pts[0, :]), min(pts[1, :]), max(pts[0, :]), max(pts[1, :])]\n center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]\n radius = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 2\n bbox = [center[0] - radius, center[1] - radius, center[0] + radius, center[1] + radius]\n\n llength = sqrt((bbox[2] - bbox[0]) ** 2 + (bbox[3] - bbox[1]) ** 2)\n center_x = (bbox[2] + bbox[0]) / 2\n center_y = (bbox[3] + bbox[1]) / 2\n\n roi_box = [0] * 4\n roi_box[0] = center_x - llength / 2\n roi_box[1] = center_y - llength / 2\n roi_box[2] = roi_box[0] + llength\n roi_box[3] = roi_box[1] + llength\n\n return roi_box\n\n\ndef parse_roi_box_from_bbox(bbox):\n left, top, right, bottom = bbox\n old_size = (right - left + bottom - top) / 2\n center_x = right - (right - left) / 2.0\n center_y = bottom - (bottom - top) / 2.0 + old_size * 0.14\n size = int(old_size * 1.58)\n roi_box = [0] * 4\n roi_box[0] = center_x - size / 2\n roi_box[1] = center_y - size / 2\n roi_box[2] = roi_box[0] + size\n roi_box[3] = roi_box[1] + size\n return roi_box\n\n\ndef dump_to_ply(vertex, tri, wfp):\n header = \"\"\"ply\n format ascii 1.0\n element vertex {}\n property float x\n property float y\n property float z\n element face {}\n property list uchar int vertex_indices\n end_header\"\"\"\n\n n_vertex = vertex.shape[1]\n n_face = tri.shape[1]\n header = header.format(n_vertex, n_face)\n\n with open(wfp, 'w') as f:\n f.write(header + '\\n')\n for i in range(n_vertex):\n x, y, z = vertex[:, i]\n f.write('{:.4f} {:.4f} {:.4f}\\n'.format(x, y, z))\n for i in range(n_face):\n idx1, idx2, idx3 = tri[:, i]\n f.write('3 {} {} {}\\n'.format(idx1 - 1, idx2 - 1, idx3 - 1))\n print('Dump tp {}'.format(wfp))\n\n\ndef dump_vertex(vertex, wfp):\n sio.savemat(wfp, {'vertex': vertex})\n print('Dump to {}'.format(wfp))\n\n\ndef _predict_vertices(param, roi_bbox, dense, transform=True):\n vertex = reconstruct_vertex(param, dense=dense)\n sx, sy, ex, ey = roi_bbox\n scale_x = (ex - sx) / 120\n scale_y = (ey - sy) / 120\n vertex[0, :] = vertex[0, :] * scale_x + sx\n vertex[1, :] = vertex[1, :] * scale_y + sy\n\n s = (scale_x + scale_y) / 2\n vertex[2, :] *= s\n\n return vertex\n\n\ndef predict_68pts(param, roi_box):\n return _predict_vertices(param, roi_box, dense=False)\n\n\ndef predict_dense(param, roi_box):\n return _predict_vertices(param, roi_box, dense=True)\n\n\ndef draw_landmarks(img, pts, style='fancy', wfp=None, show_flg=False, **kwargs):\n \"\"\"Draw landmarks using matplotlib\"\"\"\n height, width = img.shape[:2]\n plt.figure(figsize=(12, height / width * 12))\n plt.imshow(img[:, :, ::-1])\n plt.subplots_adjust(left=0, right=1, top=1, bottom=0)\n plt.axis('off')\n\n if not type(pts) in [tuple, list]:\n pts = [pts]\n for i in range(len(pts)):\n if style == 'simple':\n plt.plot(pts[i][0, :], pts[i][1, :], 'o', markersize=4, color='g')\n\n elif style == 'fancy':\n alpha = 0.8\n markersize = 4\n lw = 1.5\n color = kwargs.get('color', 'w')\n markeredgecolor = kwargs.get('markeredgecolor', 'black')\n\n nums = [0, 17, 22, 27, 31, 36, 42, 48, 60, 68]\n\n # close eyes and mouths\n plot_close = lambda i1, i2: plt.plot([pts[i][0, i1], pts[i][0, i2]], [pts[i][1, i1], pts[i][1, i2]],\n color=color, lw=lw, alpha=alpha - 0.1)\n plot_close(41, 36)\n plot_close(47, 42)\n plot_close(59, 48)\n plot_close(67, 60)\n\n for ind in range(len(nums) - 1):\n l, r = nums[ind], nums[ind + 1]\n plt.plot(pts[i][0, l:r], pts[i][1, l:r], color=color, lw=lw, alpha=alpha - 0.1)\n\n plt.plot(pts[i][0, l:r], pts[i][1, l:r], marker='o', linestyle='None', markersize=markersize,\n color=color,\n markeredgecolor=markeredgecolor, alpha=alpha)\n\n if wfp is not None:\n plt.savefig(wfp, dpi=200)\n print('Save visualization result to {}'.format(wfp))\n if show_flg:\n plt.show()\n\n\ndef get_colors(image, vertices):\n [h, w, _] = image.shape\n vertices[0, :] = np.minimum(np.maximum(vertices[0, :], 0), w - 1) # x\n vertices[1, :] = np.minimum(np.maximum(vertices[1, :], 0), h - 1) # y\n ind = np.round(vertices).astype(np.int32)\n colors = image[ind[1, :], ind[0, :], :] # n x 3\n\n return colors\n\n\ndef write_obj_with_colors(obj_name, vertices, triangles, colors):\n triangles = triangles.copy() # meshlab start with 1\n\n if obj_name.split('.')[-1] != 'obj':\n obj_name = obj_name + '.obj'\n\n # write obj\n with open(obj_name, 'w') as f:\n # write vertices & colors\n for i in range(vertices.shape[1]):\n s = 'v {:.4f} {:.4f} {:.4f} {} {} {}\\n'.format(vertices[1, i], vertices[0, i], vertices[2, i], colors[i, 2],\n colors[i, 1], colors[i, 0])\n f.write(s)\n\n # write f: ver ind/ uv ind\n for i in range(triangles.shape[1]):\n s = 'f {} {} {}\\n'.format(triangles[0, i], triangles[1, i], triangles[2, i])\n f.write(s)\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.round",
"numpy.zeros"
],
[
"matplotlib.pyplot.imshow",
"numpy.maximum",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.round",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.axis",
"scipy.io.savemat",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
binary-husky/hmp2g
|
[
"1a4f4093cd296f07348f4db4c7503aca6e1fb05c",
"1a4f4093cd296f07348f4db4c7503aca6e1fb05c"
] |
[
"ALGORITHM/conc_4hist_mathdb/net.py",
"ALGORITHM/commom/dl_pool.py"
] |
[
"import math\nimport torch,time,random\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions.categorical import Categorical\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nfrom torch.nn.modules.linear import Linear\nfrom ..commom.attention import MultiHeadAttention\nfrom ..commom.norm import DynamicNorm\nfrom ..commom.mlp import LinearFinal, SimpleMLP, ResLinear\nfrom UTILS.colorful import print亮紫\nfrom UTILS.tensor_ops import my_view, Args2tensor_Return2numpy, Args2tensor, __hash__, __hashn__, pad_at_dim\nfrom UTILS.tensor_ops import _2cpu2numpy, one_hot_with_nan, gather_righthand, pt_inf\n\n\ndef weights_init(m):\n def init_Linear(m, final_layer=False):\n nn.init.orthogonal_(m.weight.data)\n if final_layer:nn.init.orthogonal_(m.weight.data, gain=0.01)\n if m.bias is not None: nn.init.uniform_(m.bias.data, a=-0.02, b=0.02)\n\n initial_fn_dict = {\n 'Net': None, 'DataParallel':None, 'BatchNorm1d':None, 'Concentration':None,\n 'Pnet':None,'Sequential':None,'DataParallel':None,'Tanh':None,\n 'ModuleList':None,'ModuleDict':None,'MultiHeadAttention':None,\n 'SimpleMLP':None,'Extraction_Module':None,'SelfAttention_Module':None,\n 'ReLU':None,'Softmax':None,'DynamicNorm':None,'EXTRACT':None,\n 'LinearFinal':lambda m:init_Linear(m, final_layer=True),\n 'Linear':init_Linear, 'ResLinear':None, 'LeakyReLU':None,'SimpleAttention':None\n }\n\n classname = m.__class__.__name__\n assert classname in initial_fn_dict.keys(), ('how to handle the initialization of this class? ', classname)\n init_fn = initial_fn_dict[classname]\n if init_fn is None: return\n init_fn(m)\n\nclass Concentration(nn.Module):\n def __init__(self, n_focus_on, h_dim, skip_connect=False, skip_connect_dim=0, adopt_selfattn=False):\n super().__init__()\n self.n_focus_on = n_focus_on\n self.skip_connect = skip_connect\n self.skip_dim = h_dim+skip_connect_dim\n self.CT_W_query = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.CT_W_key = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.CT_W_val = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.CT_motivate_mlp = nn.Sequential(nn.Linear(h_dim * 2, h_dim), nn.ReLU(inplace=True))\n self.AT_forward_mlp = nn.Sequential(nn.Linear((n_focus_on+1)*self.skip_dim, h_dim), nn.ReLU(inplace=True))\n self.adopt_selfattn = adopt_selfattn\n if self.adopt_selfattn:\n self.AT_Attention = Extraction_Module(hidden_dim=self.skip_dim, activate_output=True)\n self.init_parameters()\n\n def init_parameters(self):\n for param in self.parameters():\n stdv = 1. / math.sqrt(param.size(-1))\n param.data.uniform_(-stdv, stdv)\n\n def forward(self, vs, ve, ve_dead, skip_connect_ze=None, skip_connect_zs=None):\n mask = ve_dead\n Q = torch.matmul(vs, self.CT_W_query) \n K = torch.matmul(ve, self.CT_W_key) \n\n norm_factor = 1 / math.sqrt(Q.shape[-1])\n compat = norm_factor * torch.matmul(Q, K.transpose(2, 3)) \n assert compat.shape[-2] == 1\n compat = compat.squeeze(-2)\n compat[mask.bool()] = -math.inf\n score = F.softmax(compat, dim=-1)\n # nodes with no neighbours were softmax into nan, fix them to 0\n score = torch.nan_to_num(score, 0)\n # ----------- motivational brach -------------\n Va = torch.matmul(score.unsqueeze(-2), torch.matmul(ve, self.CT_W_val)) \n v_M = torch.cat((vs, Va), -1).squeeze(-2) \n v_M_final = self.CT_motivate_mlp(v_M)\n # ----------- forward branch -------------\n score_sort_index = torch.argsort(score, dim=-1, descending=True)\n score_sort_drop_index = score_sort_index[..., :self.n_focus_on]\n if self.skip_connect:\n ve = torch.cat((ve, skip_connect_ze), -1)\n vs = torch.cat((vs, skip_connect_zs), -1)\n ve_C = gather_righthand(src=ve, index=score_sort_drop_index, check=False)\n need_padding = (score_sort_drop_index.shape[-1] != self.n_focus_on)\n if need_padding:\n print('the n_focus param is large than input, advise: pad observation instead of pad here')\n ve_C = pad_at_dim(ve_C, dim=-2, n=self.n_focus_on)\n v_C_stack = torch.cat((vs, ve_C), dim=-2)\n if self.adopt_selfattn:\n v_C_stack = self.AT_Attention(v_C_stack, mask=None)\n\n v_C_flat = my_view(v_C_stack, [0, 0, -1]); assert v_C_stack.dim()==4\n v_C_final = self.AT_forward_mlp(v_C_flat)\n return v_C_final, v_M_final\n\n\nclass SimpleAttention(nn.Module):\n def __init__(self, h_dim):\n super().__init__()\n self.W_query = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.W_key = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.W_val = nn.Parameter(torch.Tensor(h_dim, h_dim))\n self.init_parameters()\n\n def init_parameters(self):\n for param in self.parameters():\n stdv = 1. / math.sqrt(param.size(-1))\n param.data.uniform_(-stdv, stdv)\n\n def forward(self, k, q, v, mask=None):\n Q = torch.matmul(q, self.W_query) \n K = torch.matmul(k, self.W_key) \n V = torch.matmul(v, self.W_val)\n\n norm_factor = 1 / math.sqrt(Q.shape[-1])\n compat = norm_factor * torch.matmul(Q, K.transpose(2, 3)) \n if mask is not None: compat[mask.bool()] = -math.inf\n score = torch.nan_to_num(F.softmax(compat, dim=-1), 0)\n # ----------- motivational brach -------------\n return torch.matmul(score, V) \n\n\nclass Extraction_Module(nn.Module): # merge by MLP version\n def __init__(self, hidden_dim=128, activate_output=False):\n super().__init__()\n h_dim = hidden_dim\n from .foundation import AlgorithmConfig\n if AlgorithmConfig.use_my_attn:\n self.attn = SimpleAttention(h_dim=h_dim)\n print('use my attn')\n\n if activate_output:\n self.MLP = nn.Sequential(nn.Linear(h_dim * 2, h_dim), nn.ReLU(inplace=True))\n print(\"activate_output\")\n else:\n self.MLP = nn.Sequential(nn.Linear(h_dim * 2, h_dim))\n print(\"no activate_output\")\n\n def forward(self, agent_enc, mask=None):\n attn_out = self.attn(q=agent_enc, k=agent_enc, v=agent_enc, mask=mask)\n concated_attn_result = torch.cat(tensors=(agent_enc, attn_out), dim=-1)\n return self.MLP(concated_attn_result)\n\n\n\n\"\"\"\n network initialize\n\"\"\"\nclass Net(nn.Module):\n def __init__(self, \n rawob_dim, \n n_action):\n super().__init__()\n\n from .foundation import AlgorithmConfig\n\n self.use_normalization = AlgorithmConfig.use_normalization\n self.n_focus_on = AlgorithmConfig.n_focus_on\n self.actor_attn_mod = AlgorithmConfig.actor_attn_mod\n self.dual_conc = AlgorithmConfig.dual_conc\n self.n_entity_placeholder = AlgorithmConfig.n_entity_placeholder\n h_dim = AlgorithmConfig.net_hdim\n\n self.skip_connect = True\n self.n_action = n_action\n self.alternative_critic = AlgorithmConfig.alternative_critic\n self.exp_external_actdim = AlgorithmConfig.exp_external_actdim\n \n # observation normalization\n if self.use_normalization:\n self._batch_norm = DynamicNorm(rawob_dim, only_for_last_dim=True, exclude_one_hot=True, exclude_nan=True)\n\n self.AT_obs_encoder = nn.Sequential(nn.Linear(rawob_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim))\n\n if self.dual_conc:\n self.MIX_conc_core_f = Concentration(\n n_focus_on=self.n_focus_on-1, h_dim=h_dim, \n skip_connect=self.skip_connect, \n skip_connect_dim=rawob_dim, \n adopt_selfattn=self.actor_attn_mod)\n self.MIX_conc_core_h = Concentration(\n n_focus_on=self.n_focus_on, h_dim=h_dim, \n skip_connect=self.skip_connect, \n skip_connect_dim=rawob_dim, \n adopt_selfattn=self.actor_attn_mod)\n else:\n self.MIX_conc_core = Concentration(\n n_focus_on=self.n_focus_on, h_dim=h_dim, \n skip_connect=self.skip_connect, \n skip_connect_dim=rawob_dim, \n adopt_selfattn=self.actor_attn_mod)\n\n if self.exp_external_actdim:\n self.AT_hyper_act_net = nn.Sequential(\n Linear(9, 16),\n nn.ReLU(inplace=True),\n Linear(16, 16),\n nn.ReLU(inplace=True),\n Linear(16, 2)\n )\n tmp_dim = h_dim if not self.dual_conc else h_dim*2\n self.CT_get_value = nn.Sequential(Linear(tmp_dim, h_dim), nn.ReLU(inplace=True),Linear(h_dim, 1))\n self.CT_get_threat = nn.Sequential(Linear(tmp_dim, h_dim), nn.ReLU(inplace=True),Linear(h_dim, 1))\n\n if self.alternative_critic:\n self.CT_get_value_alternative_critic = nn.Sequential(Linear(tmp_dim, h_dim), nn.ReLU(inplace=True),Linear(h_dim, 1))\n\n # part\n self.check_n = self.n_focus_on*2\n self.AT_get_logit_db = nn.Sequential( \n nn.Linear(tmp_dim, h_dim), nn.ReLU(inplace=True),\n nn.Linear(h_dim, h_dim//2), nn.ReLU(inplace=True),\n LinearFinal(h_dim//2, self.n_action))\n\n self.is_recurrent = False\n self.apply(weights_init)\n return\n\n # two ways to support avail_act, but which one is better?\n def logit2act(self, logits_agent_cluster, eval_mode, test_mode, eval_actions=None, avail_act=None):\n if avail_act is not None: logits_agent_cluster = torch.where(avail_act>0, logits_agent_cluster, -pt_inf())\n act_dist = Categorical(logits = logits_agent_cluster)\n if not test_mode: act = act_dist.sample() if not eval_mode else eval_actions\n else: act = torch.argmax(act_dist.probs, axis=2)\n def _get_act_log_probs(distribution, action):\n return distribution.log_prob(action.squeeze(-1)).unsqueeze(-1)\n actLogProbs = _get_act_log_probs(act_dist, act) # the policy gradient loss will feedback from here\n # sum up the log prob of all agents\n distEntropy = act_dist.entropy().mean(-1) if eval_mode else None\n return act, actLogProbs, distEntropy, act_dist.probs\n\n\n @Args2tensor_Return2numpy\n def act(self, *args, **kargs):\n act = self._act if self.dual_conc else self._act_singlec\n return act(*args, **kargs)\n\n @Args2tensor\n def evaluate_actions(self, *args, **kargs):\n act = self._act if self.dual_conc else self._act_singlec\n return act(*args, **kargs, eval_mode=True)\n\n # div entity for DualConc models, distincting friend or hostile (present or history)\n def div_entity(self, mat, type=[(0,),# self\n (1, 2, 3, 4), # current\n (5, 6, 7, 8, 9),], # history\n n=10):\n assert n == self.n_entity_placeholder\n if mat.shape[-2]==n:\n tmp = (mat[..., t, :] for t in type)\n elif mat.shape[-1]==n:\n tmp = (mat[..., t] for t in type)\n return tmp\n\n def _act(self, obs, test_mode, eval_mode=False, eval_actions=None, avail_act=None):\n eval_act = eval_actions if eval_mode else None\n others = {}\n if self.use_normalization:\n # print(obs[0,0,0]) # 0 thread, 0 agent, 0 entity\n obs = self._batch_norm(obs)\n mask_dead = torch.isnan(obs).any(-1) # find dead agents\n obs = torch.nan_to_num_(obs, 0) # replace dead agents' obs, from NaN to 0\n v = self.AT_obs_encoder(obs)\n\n zs, ze_f, ze_h = self.div_entity(obs, n=self.n_entity_placeholder)\n vs, ve_f, ve_h = self.div_entity(v, n=self.n_entity_placeholder)\n _, ve_f_dead, ve_h_dead = self.div_entity(mask_dead, n=self.n_entity_placeholder)\n\n # concentration module\n vh_C, vh_M = self.MIX_conc_core_h(vs=vs, ve=ve_h, ve_dead=ve_h_dead, skip_connect_ze=ze_h, skip_connect_zs=zs)\n vf_C, vf_M = self.MIX_conc_core_f(vs=vs, ve=ve_f, ve_dead=ve_f_dead, skip_connect_ze=ze_f, skip_connect_zs=zs)\n\n # fuse forward path\n v_C_fuse = torch.cat((vf_C, vh_C), dim=-1) # (vs + vs + check_n + check_n)\n logits = self.AT_get_logit_db(v_C_fuse) # diverge here\n\n\n # motivation encoding fusion\n v_M_fuse = torch.cat((vf_M, vh_M), dim=-1)\n # motivation objectives\n value = self.CT_get_value(v_M_fuse)\n threat = self.CT_get_threat(v_M_fuse)\n\n assert not self.alternative_critic\n if self.exp_external_actdim:\n act, actLogProbs, distEntropy, probs = self.logit2act_exp(logits, zs=zs, eval_mode=eval_mode, \n test_mode=test_mode, eval_actions=eval_act, avail_act=avail_act)\n\n else:\n act, actLogProbs, distEntropy, probs = self.logit2act(logits, eval_mode=eval_mode, \n test_mode=test_mode, eval_actions=eval_act, avail_act=avail_act)\n\n def re_scale(t):\n SAFE_LIMIT = 11\n r = 1. /2. * SAFE_LIMIT\n return (torch.tanh_(t/r) + 1.) * r\n\n others['threat'] = re_scale(threat)\n if not eval_mode: return act, value, actLogProbs\n else: return value, actLogProbs, distEntropy, probs, others\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n @staticmethod\n def _get_act_log_probs(distribution, action):\n return distribution.log_prob(action.squeeze(-1)).unsqueeze(-1)\n\n\n def logit2act_exp(self, logits_agent_cluster, zs, eval_mode, test_mode, eval_actions=None, avail_act=None):\n '''\n logits_agent_detach = logits_agent_cluster.detach()\n zs_detach = zs.detach()\n zs_detach = zs_detach.squeeze(-2)\n hyper_obs = torch.cat((logits_agent_detach, zs_detach), axis=-1)\n hyper_act_logits = self.AT_hyper_act_net(hyper_obs)\n hyper_act_logits[..., 0] = 0\n hyper_act_logits[..., 1] = 10\n hyper_act_logits = hyper_act_logits.detach()\n # logits to acts, \n # input: hyper_act_logits\n # output: h_act\n h_dist = Categorical(logits = hyper_act_logits)\n if not test_mode: h_act = h_dist.sample() if not eval_mode else eval_actions\n else: h_act = torch.argmax(h_dist.probs, axis=2)\n # h_act[:] = 1\n SampleLogProb = self._get_act_log_probs(h_dist, torch.ones_like(h_act))\n hActLogProbsRef = self._get_act_log_probs(h_dist, h_act)\n # hActLogProbs = self._get_act_log_probs(h_dist, h_act)\n if avail_act is not None: logits_agent_cluster = torch.where(avail_act>0, logits_agent_cluster, -pt_inf())\n '''\n torch.nn.functional.gumbel_softmax()\n\n act_dist = Categorical(logits = logits_agent_cluster)\n act_sample = act_dist.sample() if not eval_mode else eval_actions\n act_argmax = torch.argmax(act_dist.probs, axis=2)\n # 1 是采样, 0 是贪婪\n act = act_sample # h_act: shape=($n_thread, $n_agent)\n # act = torch.where(h_act==1, act_sample, act_argmax) # h_act: shape=($n_thread, $n_agent)\n # sel_argmax = (act_argmax==act).unsqueeze(-1)\n\n actLogProbs01 = self._get_act_log_probs(act_dist, act) # the policy gradient loss will feedback from here\n # if not eval_mode:\n # which_step = zs[0,0,0,0].item()\n # n_thread = h_act.shape[0]\n # sel_hact1 = (h_act==1).sum().item()/n_thread\n # sel_hact0 = (h_act==0).sum().item()/n_thread\n # from config import GlobalConfig\n # GlobalConfig.data_logger.rec(sel_hact0, '%d-hact0'%round(which_step))\n # GlobalConfig.data_logger.rec(sel_hact1, '%d-hact1'%round(which_step))\n\n\n actLogProbs_notargmax = actLogProbs01 + 1\n #actLogProbs_argmax = torch.log( torch.exp(actLogProbs01 + SampleLogProb) + (1-torch.exp(SampleLogProb)) )\n actLogProbs = actLogProbs_notargmax # torch.where(sel_argmax, actLogProbs_argmax, actLogProbs_notargmax)\n\n # sum up the log prob of all agents\n distEntropy = act_dist.entropy().mean(-1) if eval_mode else None\n return act, actLogProbs, distEntropy, act_dist.probs",
"\"\"\"\n Author: Fu Qingxu,CASIA\n Description: deep learning sample manager\n\n\"\"\"\nimport torch\nimport numpy as np\n\nclass DeepLearningPool(object):\n def __init__(self, pool_size, batch_size) -> None:\n super().__init__()\n self.x_batch = None\n self.y_batch = None\n self.size = pool_size\n self.batch_size = batch_size\n\n\n\n def add_and_sample(self, x, y):\n n_sample = x.shape[0]\n assert n_sample > 0\n if self.x_batch is None:\n self.x_batch = np.zeros(shape=(self.size, *x.shape[1:]), dtype=x.dtype)\n self.y_batch = np.zeros(shape=(self.size, *y.shape[1:]), dtype=y.dtype)\n self.current_idx = 0\n self.current_size = 0\n idx = self._get_storage_idx(n_sample)\n self.x_batch[idx] = x\n self.y_batch[idx] = y\n return self._sample()\n\n\n def _get_storage_idx(self, inc=None):\n inc = inc or 1\n if self.current_idx + inc <= self.size:\n idx = np.arange(self.current_idx, self.current_idx + inc)\n self.current_idx += inc\n elif self.current_idx < self.size:\n overflow = inc - (self.size - self.current_idx)\n idx_a = np.arange(self.current_idx, self.size)\n idx_b = np.arange(0, overflow)\n idx = np.concatenate([idx_a, idx_b])\n self.current_idx = overflow\n else:\n idx = np.arange(0, inc)\n self.current_idx = inc\n self.current_size = min(self.size, self.current_size + inc)\n if inc == 1:\n idx = idx[0]\n return idx\n\n def _sample(self):\n idx = np.random.randint(0, self.current_size, self.batch_size)\n return self.x_batch[idx], self.y_batch[idx]\n\n\nif __name__ == '__main__':\n dlp = DeepLearningPool(10, 7)\n res = dlp.add_and_sample(x=np.random.rand(2,2,3),y=np.array([1,2]))\n print(dlp.y_batch,'res',res[1])\n res = dlp.add_and_sample(x=np.random.rand(4,2,3),y=np.array([3,4,5,6]))\n print(dlp.y_batch,'res',res[1])\n res = dlp.add_and_sample(x=np.random.rand(3,2,3),y=np.array([7,8,9]))\n print(dlp.y_batch,'res',res[1])\n res = dlp.add_and_sample(x=np.random.rand(3,2,3),y=np.array([10,11,12]))\n print(dlp.y_batch,'res',res[1])\n res = dlp.add_and_sample(x=np.random.rand(3,2,3),y=np.array([13,14,15]))\n print(dlp.y_batch,'res',res[1])\n res = dlp.add_and_sample(x=np.random.rand(3,2,3),y=np.array([16,17,18]))\n print(dlp.y_batch,'res',res[1])\n print('end of test')\n\n\n \n"
] |
[
[
"torch.distributions.categorical.Categorical",
"torch.nn.functional.softmax",
"torch.nan_to_num_",
"torch.nn.functional.gumbel_softmax",
"torch.nn.init.uniform_",
"torch.Tensor",
"torch.cat",
"torch.nn.modules.linear.Linear",
"torch.isnan",
"torch.nn.ReLU",
"torch.tanh_",
"torch.nan_to_num",
"torch.matmul",
"torch.nn.Linear",
"torch.nn.init.orthogonal_",
"torch.argsort",
"torch.argmax"
],
[
"numpy.arange",
"numpy.concatenate",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KanaCS/transformers
|
[
"d4ba8ec0d56a332fdc66d0339db4dfe1a9af7af0"
] |
[
"src/transformers/models/gpt2/modeling_gpt2.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch OpenAI GPT-2 model.\"\"\"\n\nimport os\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n SequenceClassifierOutputWithPast,\n)\nfrom ...modeling_utils import (\n Conv1D,\n PreTrainedModel,\n SequenceSummary,\n find_pruneable_heads_and_indices,\n prune_conv1d_layer,\n)\nfrom ...utils import logging\nfrom ...utils.model_parallel_utils import assert_device_map, get_device_map\nfrom .configuration_gpt2 import GPT2Config\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"GPT2Config\"\n_TOKENIZER_FOR_DOC = \"GPT2Tokenizer\"\n\nGPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"gpt2\",\n \"gpt2-medium\",\n \"gpt2-large\",\n \"gpt2-xl\",\n \"distilgpt2\",\n # See all GPT-2 models at https://huggingface.co/models?filter=gpt2\n]\n\n\ndef load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model\"\"\"\n try:\n import re\n\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(gpt2_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array.squeeze())\n\n for name, array in zip(names, arrays):\n name = name[6:] # skip \"model/\"\n name = name.split(\"/\")\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+\\d+\", m_name):\n scope_names = re.split(r\"(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"w\" or scope_names[0] == \"g\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"b\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"wpe\" or scope_names[0] == \"wte\":\n pointer = getattr(pointer, scope_names[0])\n pointer = getattr(pointer, \"weight\")\n else:\n pointer = getattr(pointer, scope_names[0])\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\nclass Attention(nn.Module):\n def __init__(self, nx, n_ctx, config, scale=False, is_cross_attention=False):\n super().__init__()\n\n n_state = nx # in Attention: n_state=768 (nx=n_embd)\n # [switch nx => n_state from Block to Attention to keep identical to TF implem]\n assert n_state % config.n_head == 0\n self.register_buffer(\n \"bias\", torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx)\n )\n self.register_buffer(\"masked_bias\", torch.tensor(-1e4))\n self.n_head = config.n_head\n self.split_size = n_state\n self.scale = scale\n self.is_cross_attention = is_cross_attention\n if self.is_cross_attention:\n self.c_attn = Conv1D(2 * n_state, nx)\n self.q_attn = Conv1D(n_state, nx)\n else:\n self.c_attn = Conv1D(3 * n_state, nx)\n self.c_proj = Conv1D(n_state, nx)\n self.attn_dropout = nn.Dropout(config.attn_pdrop)\n self.resid_dropout = nn.Dropout(config.resid_pdrop)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.n_head, self.split_size // self.n_head, self.pruned_heads\n )\n index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])\n\n # Prune conv1d layers\n self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)\n self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)\n\n # Update hyper params\n self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))\n self.n_head = self.n_head - len(heads)\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):\n w = torch.matmul(q, k)\n if self.scale:\n w = w / (float(v.size(-1)) ** 0.5)\n nd, ns = w.size(-2), w.size(-1)\n\n if not self.is_cross_attention:\n # if only \"normal\" attention layer implements causal mask\n mask = self.bias[:, :, ns - nd : ns, :ns]\n w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))\n\n if attention_mask is not None:\n # Apply the attention mask\n w = w + attention_mask\n\n w = nn.Softmax(dim=-1)(w)\n w = self.attn_dropout(w)\n\n # Mask heads if we want to\n if head_mask is not None:\n w = w * head_mask\n\n outputs = (torch.matmul(w, v),)\n if output_attentions:\n outputs += (w,)\n return outputs\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states\n\n def split_heads(self, x, k=False):\n new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states\n if k:\n return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n else:\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def forward(\n self,\n hidden_states,\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=False,\n output_attentions=False,\n ):\n if encoder_hidden_states is not None:\n assert hasattr(\n self, \"q_attn\"\n ), \"If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`.\"\n query = self.q_attn(hidden_states)\n key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)\n attention_mask = encoder_attention_mask\n else:\n query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)\n\n query = self.split_heads(query)\n key = self.split_heads(key, k=True)\n value = self.split_heads(value)\n if layer_past is not None:\n past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below\n key = torch.cat((past_key, key), dim=-1)\n value = torch.cat((past_value, value), dim=-2)\n\n if use_cache is True:\n present = (key.transpose(-2, -1), value) # transpose to have same shapes\n else:\n present = None\n\n attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)\n a = attn_outputs[0]\n\n a = self.merge_heads(a)\n a = self.c_proj(a)\n a = self.resid_dropout(a)\n\n return (a, present) + attn_outputs[1:] # a, present, (attentions)\n\n\nclass MLP(nn.Module):\n def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)\n super().__init__()\n nx = config.n_embd\n self.c_fc = Conv1D(n_state, nx)\n self.c_proj = Conv1D(nx, n_state)\n self.act = ACT2FN[config.activation_function]\n self.dropout = nn.Dropout(config.resid_pdrop)\n\n def forward(self, x):\n h = self.act(self.c_fc(x))\n h2 = self.c_proj(h)\n return self.dropout(h2)\n\n\nclass Block(nn.Module):\n def __init__(self, n_ctx, config, scale=False):\n super().__init__()\n hidden_size = config.n_embd\n inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size\n self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n self.attn = Attention(hidden_size, n_ctx, config, scale)\n self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n if config.add_cross_attention:\n self.crossattention = Attention(hidden_size, n_ctx, config, scale, is_cross_attention=True)\n self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n self.mlp = MLP(inner_dim, config)\n\n def forward(\n self,\n hidden_states,\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=False,\n output_attentions=False,\n ):\n attn_outputs = self.attn(\n self.ln_1(hidden_states),\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n attn_output = attn_outputs[0] # output_attn: a, present, (attentions)\n outputs = attn_outputs[1:]\n # residual connection\n hidden_states = attn_output + hidden_states\n\n if encoder_hidden_states is not None:\n # add one self-attention block for cross-attention\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n cross_attn_outputs = self.crossattention(\n self.ln_cross_attn(hidden_states),\n attention_mask=attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n )\n attn_output = cross_attn_outputs[0]\n # residual connection\n hidden_states = hidden_states + attn_output\n outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights\n\n feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))\n # residual connection\n hidden_states = hidden_states + feed_forward_hidden_states\n\n if use_cache:\n outputs = (hidden_states,) + outputs\n else:\n outputs = (hidden_states,) + outputs[1:]\n\n return outputs # hidden_states, present, (attentions, cross_attentions)\n\n\nclass GPT2PreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = GPT2Config\n load_tf_weights = load_tf_weights_in_gpt2\n base_model_prefix = \"transformer\"\n is_parallelizable = True\n\n def __init__(self, *inputs, **kwargs):\n super().__init__(*inputs, **kwargs)\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\n@dataclass\nclass GPT2DoubleHeadsModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of models predicting if two sentences are consecutive or not.\n\n Args:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):\n Language modeling loss.\n mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):\n Multiple choice classification loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):\n Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).\n past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n Tuple of length :obj:`config.n_layers`, containing tuples of tensors of shape :obj:`(batch_size, num_heads,\n sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see\n :obj:`past_key_values` input) to speed up sequential decoding.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n mc_loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n mc_logits: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nGPT2_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nGPT2_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):\n :obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else\n ``past_key_values[0][0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n\n If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be\n passed as ``input_ids``.\n\n Indices can be obtained using :class:`~transformers.GPT2Tokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers`):\n Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see\n :obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which\n have their past given to this model should not be passed as ``input_ids`` as they have already been\n computed.\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n\n If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see\n :obj:`past_key_values`).\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\nPARALLELIZE_DOCSTRING = r\"\"\"\n This is an experimental feature and is a subject to change at a moment's notice.\n\n Uses a device map to distribute attention modules of the model across several devices. If no device map is given,\n it will evenly distribute blocks across all devices.\n\n Args:\n device_map (:obj:`Dict[int, list]`, optional, defaults to None):\n A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always\n automatically mapped to the first device (for esoteric reasons). That means that the first device should\n have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the\n following number of attention modules:\n\n - gpt2: 12\n - gpt2-medium: 24\n - gpt2-large: 36\n - gpt2-xl: 48\n\n Example::\n\n # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:\n model = GPT2LMHeadModel.from_pretrained('gpt2-xl')\n device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8],\n\n 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],\n 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],\n 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]}\n model.parallelize(device_map)\n\"\"\"\nDEPARALLELIZE_DOCSTRING = r\"\"\"\n Moves the model to cpu from a model parallel state.\n\n Example::\n\n # On a 4 GPU machine with gpt2-large:\n model = GPT2LMHeadModel.from_pretrained('gpt2-large')\n device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7],\n\n 1: [8, 9, 10, 11, 12, 13, 14, 15],\n 2: [16, 17, 18, 19, 20, 21, 22, 23],\n 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]}\n model.parallelize(device_map) # Splits the model across several devices\n model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()\n\"\"\"\nclass GPT2Embeddings(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n self.wpe = nn.Embedding(config.n_positions, config.n_embd)\n self.emb3 = nn.Embedding(config.emb3_size, config.n_embd)\n self.emb4 = nn.Embedding(config.emb4_size, config.n_embd)\n # token type embedding also -> wte\n self.drop = nn.Dropout(config.embd_pdrop)\n # layer norm is in the robertamodel \n \n \n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, emb3_ids=None, emb4_ids=None, inputs_embeds=None\n ):\n # some processing of forward input is done on Model class (not necessary to move here i think?)\n # tok emb + pos emb\n if inputs_embeds is None:\n inputs_embeds = self.wte(input_ids)\n position_embeds = self.wpe(position_ids)\n hidden_states = inputs_embeds + position_embeds\n\n # tok type emb\n if token_type_ids is not None:\n token_type_embeds = self.wte(token_type_ids)\n hidden_states = hidden_states + token_type_embeds\n\n # third emb\n if emb3_ids is not None:\n emb3_embeds = self.emb3(emb3_ids)\n hidden_states = hidden_states + emb3_embeds\n \n # fourth emb\n if emb4_ids is not None:\n emb4_embeds = self.emb4(emb4_ids)\n hidden_states = hidden_states + emb4_embeds\n \n # fith emb\n # dropout \n hidden_states = self.drop(hidden_states)\n return hidden_states\n\n\n\n@add_start_docstrings(\n \"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.\",\n GPT2_START_DOCSTRING,\n)\nclass GPT2Model(GPT2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n # NEW\n self.embeddings = GPT2Embeddings(config)\n # NEW\n # self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n # self.wpe = nn.Embedding(config.n_positions, config.n_embd)\n # self.drop = nn.Dropout(config.embd_pdrop)\n self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])\n self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)\n\n self.init_weights()\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\n def parallelize(self, device_map=None):\n # Check validity of device_map\n self.device_map = (\n get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map\n )\n assert_device_map(self.device_map, len(self.h))\n self.model_parallel = True\n self.first_device = \"cpu\" if \"cpu\" in self.device_map.keys() else \"cuda:\" + str(min(self.device_map.keys()))\n self.last_device = \"cuda:\" + str(max(self.device_map.keys()))\n # self.wte = self.wte.to(self.first_device)\n # self.wpe = self.wpe.to(self.first_device)\n self.embeddings = self.embeddings.to(self.first_device)\n # Load onto devices\n for k, v in self.device_map.items():\n for block in v:\n cuda_device = \"cuda:\" + str(k)\n self.h[block] = self.h[block].to(cuda_device)\n # ln_f to last\n self.ln_f = self.ln_f.to(self.last_device)\n\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\n def deparallelize(self):\n self.model_parallel = False\n self.device_map = None\n self.first_device = \"cpu\"\n self.last_device = \"cpu\"\n # self.wte = self.wte.to(\"cpu\")\n # self.wpe = self.wpe.to(\"cpu\")\n for index in range(len(self.h)):\n self.h[index] = self.h[index].to(\"cpu\")\n self.ln_f = self.ln_f.to(\"cpu\")\n torch.cuda.empty_cache()\n\n def get_input_embeddings(self):\n return self.embeddings.wte\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings.wte = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.h[layer].attn.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"gpt2\",\n output_type=BaseModelOutputWithPastAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n emb3_ids=None,\n emb4_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n batch_size = input_ids.shape[0]\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size = inputs_embeds.shape[0]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n if position_ids is not None:\n position_ids = position_ids.view(-1, input_shape[-1])\n\n if past_key_values is None:\n past_length = 0\n past_key_values = tuple([None] * len(self.h))\n else:\n past_length = past_key_values[0][0].size(-2)\n if position_ids is None:\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])\n\n # Attention mask.\n if attention_mask is not None:\n assert batch_size > 0, \"batch_size has to be defined and > 0\"\n attention_mask = attention_mask.view(batch_size, -1)\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n attention_mask = attention_mask[:, None, None, :]\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility\n attention_mask = (1.0 - attention_mask) * -10000.0\n\n # If a 2D ou 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.add_cross_attention and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # head_mask has shape n_layer x batch x n_heads x N x N\n head_mask = self.get_head_mask(head_mask, self.config.n_layer)\n\n # if inputs_embeds is None:\n # inputs_embeds = self.wte(input_ids)\n # position_embeds = self.wpe(position_ids)\n # hidden_states = inputs_embeds + position_embeds\n\n # if token_type_ids is not None:\n # token_type_embeds = self.wte(token_type_ids)\n # hidden_states = hidden_states + token_type_embeds\n\n # hidden_states = self.drop(hidden_states)\n \n # NEW \n hidden_states = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n emb3_ids=emb3_ids,\n emb4_ids=emb4_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n )\n # NEW \n\n output_shape = input_shape + (hidden_states.size(-1),)\n\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n all_hidden_states = () if output_hidden_states else None\n for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n\n # Model parallel\n if self.model_parallel:\n torch.cuda.set_device(hidden_states.device)\n # Ensure layer_past is on same device as hidden_states (might not be correct)\n if layer_past is not None:\n layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)\n # Ensure that attention_mask is always on the same device as hidden_states\n if attention_mask is not None:\n attention_mask = attention_mask.to(hidden_states.device)\n if isinstance(head_mask, torch.Tensor):\n head_mask = head_mask.to(hidden_states.device)\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n if use_cache:\n logger.warn(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, use_cache, output_attentions)\n\n return custom_forward\n\n outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(block),\n hidden_states,\n None,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n outputs = block(\n hidden_states,\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask[i],\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)\n\n # Model Parallel: If it's the last layer for that device, put things on the next device\n if self.model_parallel:\n for k, v in self.device_map.items():\n if i == v[-1] and \"cuda:\" + str(k) != self.last_device:\n hidden_states = hidden_states.to(\"cuda:\" + str(k + 1))\n\n hidden_states = self.ln_f(hidden_states)\n\n hidden_states = hidden_states.view(*output_shape)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n \"\"\",\n GPT2_START_DOCSTRING,\n)\nclass GPT2LMHeadModel(GPT2PreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.masked_bias\", r\"lm_head\\.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.transformer = GPT2Model(config)\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n self.init_weights()\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\n def parallelize(self, device_map=None):\n self.device_map = (\n get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))\n if device_map is None\n else device_map\n )\n assert_device_map(self.device_map, len(self.transformer.h))\n self.transformer.parallelize(self.device_map)\n self.lm_head = self.lm_head.to(self.transformer.first_device)\n self.model_parallel = True\n\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\n def deparallelize(self):\n self.transformer.deparallelize()\n self.transformer = self.transformer.to(\"cpu\")\n self.lm_head = self.lm_head.to(\"cpu\")\n self.model_parallel = False\n torch.cuda.empty_cache()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n # only last token for inputs_ids if past is defined in kwargs\n if past:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n else:\n position_ids = None\n return {\n \"input_ids\": input_ids,\n \"past_key_values\": past,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n\n @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"gpt2\",\n output_type=CausalLMOutputWithCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n emb3_ids=None,\n emb4_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to\n ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n emb3_ids=emb3_ids,\n emb4_ids=emb4_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n\n # Set device for model parallelism\n if self.model_parallel:\n torch.cuda.set_device(self.transformer.first_device)\n hidden_states = hidden_states.to(self.lm_head.weight.device)\n\n lm_logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=loss,\n logits=lm_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n cross_attentions=transformer_outputs.cross_attentions,\n )\n\n @staticmethod\n def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:\n \"\"\"\n This function is used to re-order the :obj:`past_key_values` cache if\n :meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is\n called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.\n \"\"\"\n return tuple(\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n for layer_past in past\n )\n\n\n@add_start_docstrings(\n \"\"\"\nThe GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for\nRocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the\ninput embeddings, the classification head takes as input the input of a specified classification token index in the\ninput sequence).\n\"\"\",\n GPT2_START_DOCSTRING,\n)\nclass GPT2DoubleHeadsModel(GPT2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n config.num_labels = 1\n self.transformer = GPT2Model(config)\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n self.multiple_choice_head = SequenceSummary(config)\n\n self.init_weights()\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n # only last token for inputs_ids if past is defined in kwargs\n if past:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n else:\n position_ids = None\n\n return {\n \"input_ids\": input_ids,\n \"past_key_values\": past,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n\n @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n emb3_ids=None,\n emb4_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n mc_token_ids=None,\n labels=None,\n mc_labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input):\n Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) -\n 1[``.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n ``labels = input_ids`` Indices are selected in ``[-1, 0, ..., config.vocab_size]`` All labels set to\n ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``\n mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see\n `input_ids` above)\n\n Return:\n\n Example::\n\n >>> import torch\n >>> from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel\n\n >>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n >>> model = GPT2DoubleHeadsModel.from_pretrained('gpt2')\n\n >>> # Add a [CLS] to the vocabulary (we should train it also!)\n >>> num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'})\n\n >>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size\n\n >>> choices = [\"Hello, my dog is cute [CLS]\", \"Hello, my cat is cute [CLS]\"]\n >>> encoded_choices = [tokenizer.encode(s) for s in choices]\n >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]\n\n >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2\n >>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1\n\n >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)\n >>> lm_logits = outputs.logits\n >>> mc_logits = outputs.mc_logits\n\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n emb3_ids=emb3_ids,\n emb4_ids=emb4_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)\n\n mc_loss = None\n if mc_labels is not None:\n loss_fct = CrossEntropyLoss()\n mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))\n lm_loss = None\n if labels is not None:\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n if not return_dict:\n output = (lm_logits, mc_logits) + transformer_outputs[1:]\n if mc_loss is not None:\n output = (mc_loss,) + output\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return GPT2DoubleHeadsModelOutput(\n loss=lm_loss,\n mc_loss=mc_loss,\n logits=lm_logits,\n mc_logits=mc_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n @staticmethod\n def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:\n \"\"\"\n This function is used to re-order the :obj:`past_key_values` cache if\n :meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is\n called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.\n \"\"\"\n return tuple(\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n for layer_past in past\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The GPT2 Model transformer with a sequence classification head on top (linear layer).\n\n :class:`~transformers.GPT2ForSequenceClassification` uses the last token in order to do the classification, as\n other causal models (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n :obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each\n row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot\n guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take\n the last value in each row of the batch).\n \"\"\",\n GPT2_START_DOCSTRING,\n)\nclass GPT2ForSequenceClassification(GPT2PreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.masked_bias\", r\"lm_head\\.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.transformer = GPT2Model(config)\n self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)\n\n self.init_weights()\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"microsoft/dialogrpt\",\n output_type=SequenceClassifierOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n emb3_ids=None,\n emb4_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n emb3_ids=emb3_ids,\n emb4_ids=emb4_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n logits = self.score(hidden_states)\n\n if input_ids is not None:\n batch_size, sequence_length = input_ids.shape[:2]\n else:\n batch_size, sequence_length = inputs_embeds.shape[:2]\n\n assert (\n self.config.pad_token_id is not None or batch_size == 1\n ), \"Cannot handle batch sizes > 1 if no padding token is defined.\"\n if self.config.pad_token_id is None:\n sequence_lengths = -1\n else:\n if input_ids is not None:\n sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1\n else:\n sequence_lengths = -1\n logger.warning(\n f\"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be \"\n f\"unexpected if using padding tokens in conjunction with `inputs_embeds.`\"\n )\n\n pooled_logits = logits[range(batch_size), sequence_lengths]\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(pooled_logits.view(-1), labels.to(self.dtype).view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (pooled_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutputWithPast(\n loss=loss,\n logits=pooled_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n"
] |
[
[
"torch.nn.Softmax",
"torch.cat",
"torch.nn.Embedding",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.from_numpy",
"torch.tensor",
"torch.arange",
"tensorflow.train.list_variables",
"torch.cuda.empty_cache",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"torch.cuda.device_count",
"torch.ne",
"torch.cuda.set_device",
"torch.nn.LayerNorm",
"torch.matmul",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
tttom/MacroMax
|
[
"e5f66252befb11e9fd906eb6e1a8a8c5eacf1451",
"e5f66252befb11e9fd906eb6e1a8a8c5eacf1451"
] |
[
"python/macromax/bound.py",
"python/macromax/utils/array/grid.py"
] |
[
"\"\"\"\nThe module provides the abstract :class:`Bound` to represent the boundary of the simulation, e.g. periodic, or\ngradually more absorbing. Specific boundaries are implemented as subclasses and can be used directly as the `bound`\nargument to :func:`macromax.solve` or :class:`macromax.Solution`. The precludes the inclusion of boundaries in the material description.\nIt is sufficient to leave some space for the boundaries.\n\"\"\"\nimport numpy as np\nfrom typing import Union, Sequence, Callable\n\nfrom macromax.utils.array import Grid\n\n\nclass Electric:\n \"\"\" Mixin for Bound to indicate that the electric susceptibility is non-zero.\"\"\"\n pass\n\n\nclass Magnetic:\n \"\"\" Mixin for Bound to indicate that the magnetic susceptibility is non-zero.\"\"\"\n pass\n\n\nclass Bound:\n \"\"\"\n A base class to represent calculation-volume-boundaries.\n Use the sub-classes for practical implementations.\n \"\"\"\n def __init__(self, grid: Union[Grid, Sequence, np.ndarray] = None,\n thickness: Union[float, Sequence, np.ndarray] = 0.0,\n background_permittivity: complex = 1.0):\n \"\"\"\n :param grid: The Grid to which to the boundaries will be applied.\n :param thickness: The thickness as a scalar, vector, or 2d-array (axes x side). Broadcasting is used as necessary.\n :param background_permittivity: The background permittivity of the boundary (default: 1.0 for vacuum). This is\n only used when the absolute permittivity is requested.\n \"\"\"\n if not isinstance(grid, Grid):\n grid = Grid.from_ranges(grid)\n self.__grid = grid\n self.__thickness = np.broadcast_to(thickness, (self.grid.ndim, 2)).astype(float)\n self.__background_permittivity = background_permittivity\n\n @property\n def grid(self):\n \"\"\"The Grid object indicating the uniform Cartesian grid of the entire calculation volume.\"\"\"\n return self.__grid\n\n @property\n def thickness(self) -> np.ndarray:\n \"\"\"\n The thickness as a 2D-array `thickness[axis, front_back]` in meters.\n \"\"\"\n return self.__thickness.copy()\n\n @property\n def background_permittivity(self) -> float:\n \"\"\"A complex scalar indicating the permittivity of the background.\"\"\"\n return self.__background_permittivity\n\n @property\n def electric_susceptibility(self) -> np.ndarray:\n \"\"\"\n The electric susceptibility, chi_E, at every sample point.\n Note that the returned array may have singleton dimensions that must be broadcast!\n \"\"\"\n return np.zeros(self.grid.shape)\n\n @property\n def permittivity(self) -> np.ndarray:\n \"\"\"\n The electric permittivity, epsilon, at every sample point.\n Note that the returned array may have singleton dimensions that must be broadcast!\n \"\"\"\n return self.background_permittivity + self.electric_susceptibility\n\n @property\n def magnetic_susceptibility(self) -> np.ndarray:\n \"\"\"\n The magnetic susceptibility, chi_H, at every sample point.\n Note that the returned array may have singleton dimensions that must be broadcast!\n \"\"\"\n return np.zeros(self.grid.shape)\n\n @property\n def permeability(self) -> np.ndarray:\n \"\"\"\n The magnetic permeability, mu, at every sample point.\n Note that the returned array may have singleton dimensions that must be broadcast!\n \"\"\"\n return 1.0 + self.magnetic_susceptibility\n\n @property\n def inside(self) -> np.ndarray:\n \"\"\"Returns a boolean array indicating True for the voxels between the boundaries.\"\"\"\n result = np.asarray(False)\n for axis in range(self.grid.ndim):\n rng = self.grid[axis]\n result = result & (rng[0] + self.thickness[axis, 0] <= rng & rng < rng[-1] - self.thickness[axis, 1])\n return result\n\n @property\n def outside(self) -> np.ndarray:\n \"\"\"Returns a boolean array indicating True for the voxels in the boundaries, i.e. outside the area of the calculation.\"\"\"\n return np.logical_not(self.inside)\n\n\nclass PeriodicBound(Bound):\n def __init__(self, grid: Union[Grid, Sequence, np.ndarray]):\n \"\"\"\n Constructs an object that represents periodic boundaries.\n \n :param grid: The Grid to which to the boundaries will be applied.\n \"\"\"\n super().__init__(grid=grid, thickness=0.0)\n\n\nclass AbsorbingBound(Bound, Electric):\n def __init__(self, grid: Union[Grid, Sequence, np.ndarray], thickness: Union[float, Sequence, np.ndarray] = 0.0,\n extinction_coefficient_function: Union[Callable, Sequence, np.ndarray] = lambda rel_depth: rel_depth,\n background_permittivity: complex = 1.0):\n \"\"\"\n Constructs a boundary with depth-dependent extinction coefficient, kappa(rel_depth).\n\n :param grid: The Grid to which to the boundaries will be applied.\n :param thickness: The boundary thickness(es) in meters. This can be specified as a 2d-array [axis, side].\n Singleton dimensions are broadcast.\n :param extinction_coefficient_function: A function that returns the extinction coefficient as function of\n the depth in the boundary relative to the total thickness of the boundary.\n :param background_permittivity: (default: 1.0 for vacuum)\n \"\"\"\n super().__init__(grid=grid, thickness=thickness, background_permittivity=background_permittivity)\n self.__extinction_coefficient_functions = np.broadcast_to(extinction_coefficient_function, (self.grid.ndim, 2))\n\n @property\n def is_electric(self) -> bool:\n \"\"\"True when this boundary affects the permittivity, extinction coefficient, or complex refractive index.\"\"\"\n return True\n\n @property\n def extinction(self) -> np.ndarray:\n \"\"\"\n Determines the extinction coefficient, kappa, of the boundary on a plaid grid.\n The only non-zero values are found in the boundaries. At the corners, the maximum extinction value of the\n overlapping dimensions is returned.\n\n Note that the returned array may have singleton dimensions that must be broadcast!\n\n :return: An nd-array with the extinction coefficient, kappa.\n \"\"\"\n kappa = 0.0\n for axis, rng in enumerate(self.grid):\n for back_side in range(2):\n thickness = self.thickness[axis, back_side] * np.sign(self.grid.step[axis])\n if not back_side:\n new_depth_in_boundary = (rng.ravel()[0] + thickness) - rng\n else:\n new_depth_in_boundary = rng - (rng.ravel()[-1] - thickness)\n new_depth_in_boundary *= np.sign(self.grid.step[axis])\n in_boundary = new_depth_in_boundary > 0\n if np.any(in_boundary):\n rel_depth = in_boundary * new_depth_in_boundary / thickness\n kappa_function = self.__extinction_coefficient_functions[axis, back_side]\n kappa = np.maximum(kappa, kappa_function(rel_depth) * in_boundary)\n return kappa\n\n @property\n def electric_susceptibility(self) -> np.ndarray:\n \"\"\"\n The electric susceptibility, chi_E, at every sample point.\n Note that the returned array may have singleton dimensions that must be broadcast!\n \"\"\"\n n = np.lib.scimath.sqrt(self.background_permittivity)\n epsilon = (n + 1j * self.extinction)**2\n return epsilon - self.background_permittivity\n\n\nclass LinearBound(AbsorbingBound):\n def __init__(self, grid: Union[Grid, Sequence, np.ndarray], thickness: Union[float, Sequence, np.ndarray] = 0.0,\n max_extinction_coefficient: Union[float, Sequence, np.ndarray] = 0.1,\n background_permittivity: complex = 1.0):\n \"\"\"\n Constructs a boundary with linearly increasing extinction coefficient, kappa.\n\n :param grid: The Grid to which to the boundaries will be applied.\n :param thickness: The boundary thickness(es) in meters. This can be specified as a 2d-array [axis, side].\n Singleton dimensions are broadcast.\n :param max_extinction_coefficient: The maximum extinction coefficient, reached at the deepest point of the\n boundary at the edge of the calculation volume.\n :param background_permittivity: (default: 1.0 for vacuum)\n \"\"\"\n # Define a linear function for every axis and every side\n kappa_function = np.vectorize(lambda kappa_max: lambda rel_depth: kappa_max * rel_depth)\\\n (max_extinction_coefficient)\n super().__init__(grid=grid, thickness=thickness,\n extinction_coefficient_function=kappa_function,\n background_permittivity=background_permittivity)\n\n",
"from typing import Union, Sequence\nimport numpy as np\n\nfrom .vector_to_axis import vector_to_axis\nfrom macromax.utils import ft\n\n\nclass Grid(Sequence):\n \"\"\"\n A class representing an immutable uniformly-spaced plaid Cartesian grid and its Fourier Transform.\n\n See also: :class:`MutableGrid`.\n \"\"\"\n def __init__(self, shape=None, step=None, extent=None, first=None, center=None, last=None, include_last=False,\n ndim: int = None,\n flat: Union[bool, Sequence, np.ndarray] = False,\n origin_at_center: Union[bool, Sequence, np.ndarray] = True,\n center_at_index: Union[bool, Sequence, np.ndarray] = True):\n \"\"\"\n Construct an immutable Grid object.\n :param shape: An integer vector array with the shape of the sampling grid.\n :param step: A vector array with the spacing of the sampling grid.\n :param extent: The extent of the sampling grid as shape * step\n :param first: A vector array with the first element for each dimension.\n The first element is the smallest element if step is positive, and the largest when step is negative.\n :param center: A vector array with the center element for each dimension. The center position in the grid is\n rounded to the next integer index unless center_at_index is set to False for that partical axis.\n :param last: A vector array with the last element for each dimension. Unless include_last is set to True for\n the associated dimension, all but the last element is returned when calling self[axis].\n :param include_last: A boolean vector array indicating whether the returned vectors, self[axis], should include\n the last element (True) or all-but-the-last (False)\n :param ndim: A scalar integer indicating the number of dimensions of the sampling space.\n :param flat: A boolean vector array indicating whether the returned vectors, self[axis], should be\n flattened (True) or returned as an open grid (False)\n :param origin_at_center: A boolean vector array indicating whether the origin should be fft-shifted (True)\n or be ifftshifted to the front (False) of the returned vectors for self[axis].\n :param center_at_index: A boolean vector array indicating whether the center of the grid should be rounded to an\n integer index for each dimension. If False and the shape has an even number of elements, the next index is used\n as the center, (self.shape / 2).astype(np.int).\n \"\"\"\n # Figure out what dimension is required\n if ndim is None:\n ndim = 0\n if shape is not None:\n ndim = np.maximum(ndim, np.array(shape).size)\n if step is not None:\n ndim = np.maximum(ndim, np.array(step).size)\n if extent is not None:\n ndim = np.maximum(ndim, np.array(extent).size)\n if first is not None:\n ndim = np.maximum(ndim, np.array(first).size)\n if center is not None:\n ndim = np.maximum(ndim, np.array(center).size)\n if last is not None:\n ndim = np.maximum(ndim, np.array(last).size)\n self.__ndim = ndim\n\n def is_vector(value):\n return value is not None and not np.isscalar(value)\n self.__multidimensional = is_vector(shape) or is_vector(step) or is_vector(extent) or \\\n is_vector(first) or is_vector(center) or is_vector(last)\n\n # Convert all input arguments to vectors of length ndim\n shape, step, extent, first, center, last, flat, origin_at_center, include_last, center_at_index = \\\n self.__all_to_ndim(shape, step, extent, first, center, last, flat, origin_at_center, include_last,\n center_at_index)\n\n if shape is None:\n if step is not None:\n if extent is None:\n # Try to work it out from first, center, or last\n if first is not None and last is not None:\n extent = last - first\n elif first is not None and center is not None:\n extent = 2 * (center - first)\n elif center is not None and last is not None:\n extent = 2 * (last - center)\n else:\n raise TypeError('Could not determine Grid.shape. Neither shape, nor extent, nor two out of first, center, and last have been specified.')\n shape = np.round(np.real(self._to_ndim(extent) / self._to_ndim(step))).astype(np.int)\n else:\n raise TypeError('Could not determine Grid.shape. Neither shape, nor step have been specified.')\n shape += include_last\n else:\n shape = np.round(shape).astype(np.int) # Make sure that the shape is integer\n # At this point the shape is not None\n if step is None:\n if extent is not None:\n nb_steps = shape - include_last\n step = extent / nb_steps\n else:\n step = self._to_ndim(1) # Default step size to 1\n # At this point the step is not None\n if center is None:\n if last is not None:\n nb_steps = shape - include_last\n first = last - step * nb_steps\n if first is not None:\n half_shape = shape / 2\n half_shape[center_at_index] = np.floor(half_shape[center_at_index])\n if np.all(center_at_index):\n half_shape = half_shape.astype(np.int)\n center = first + step * half_shape\n else:\n center = self._to_ndim(0) # Center around 0 by default\n # At this point the center is not None\n\n if np.any(shape < 1):\n raise AttributeError(f'shape = {shape}. All input ranges should have at least one element.')\n\n self._shape = shape.astype(int)\n self._step = step.astype((step[0] + center[0]).dtype)\n self._center = center.astype((step[0] + center[0]).dtype)\n self._flat = flat\n self._origin_at_center = origin_at_center\n self.__center_at_index = center_at_index\n\n @staticmethod\n def from_ranges(*ranges: Union[int, float, complex, Sequence, np.ndarray]):\n \"\"\"\n Converts one or more ranges of numbers to a single Grid object representation.\n The ranges can be specified as separate parameters or as a tuple.\n\n :param ranges: one or more ranges of uniformly spaced numbers.\n :return: A Grid object that represents the same ranges.\n \"\"\"\n # Convert slices to range vectors. This won't work with infinite slices\n ranges = [(np.arange(rng.start, rng.stop, rng.step) if isinstance(rng, slice) else rng) for rng in ranges]\n ranges = [np.array([rng] if np.isscalar(rng) else rng) for rng in ranges] # Treat a scalar a singleton vector\n if any(_.size < 1 for _ in ranges):\n raise AttributeError('All input ranges should have at least one element.')\n ranges = [(rng.swapaxes(0, axis).reshape(rng.shape[axis], -1)[:, 0] if rng.ndim > 1 else rng)\n for axis, rng in enumerate(ranges)]\n # Work out some properties about the shape and the size of each dimension\n shape = np.array([rng.size for rng in ranges])\n singleton = shape <= 1\n odd = np.mod(shape, 2) == 1\n # Work our what are the first and last elements, which could be at the center\n first = np.array([rng[0] for rng in ranges]) # first when fftshifted, center+ otherwise\n before_center = np.array([rng[int((rng.size - 1) / 2)] for rng in ranges]) # last when ifftshifted, center+ otherwise\n after_center = np.array([rng[-int(rng.size / 2)] for rng in ranges]) # first when ifftshifted, center- otherwise\n last = np.array([rng[-1] for rng in ranges]) # last when fftshifted, center- otherwise\n # The last value is included!\n\n # If it is not monotonous it is ifftshifted\n origin_at_center = np.abs(last - first) >= np.abs(before_center - after_center)\n # Figure out what is the step size and the center element\n extent_m1 = origin_at_center * (last - first) + (1 - origin_at_center) * (before_center - after_center)\n step = extent_m1 / (shape - 1 + singleton) # Note that the step can be a complex number\n center = origin_at_center * (odd * before_center + (1 - odd) * after_center) + (1 - origin_at_center) * first\n\n return Grid(shape=shape, step=step, center=center, flat=False, origin_at_center=origin_at_center)\n\n\n #\n # Grid and array properties\n #\n\n @property\n def ndim(self) -> int:\n \"\"\"The number of dimensions of the space this grid spans.\"\"\"\n return self.__ndim\n\n @property\n def shape(self) -> np.array:\n \"\"\"The number of sample points along each axis of the grid.\"\"\"\n return self._shape\n\n @property\n def step(self) -> np.ndarray:\n \"\"\"The sample spacing along each axis of the grid.\"\"\"\n return self._step\n\n @property\n def center(self) -> np.ndarray:\n \"\"\"The central coordinate of the grid.\"\"\"\n return self._center\n\n @property\n def center_at_index(self) -> np.array:\n \"\"\"\n Boolean vector indicating whether the central coordinate is aligned with a grid point when the number\n of points is even along the associated axis. This has no effect when the the number of sample points is odd.\n \"\"\"\n return self.__center_at_index\n\n @property\n def flat(self) -> np.array:\n \"\"\"\n Boolean vector indicating whether self[axis] returns flattened (raveled) vectors (True) or not (False).\n \"\"\"\n return self._flat\n\n @property\n def origin_at_center(self) -> np.array:\n \"\"\"\n Boolean vector indicating whether self[axis] returns ranges that are monotonous (True) or\n ifftshifted so that the central index is the first element of the sequence (False).\n \"\"\"\n return self._origin_at_center\n\n #\n # Conversion methods\n #\n\n @property\n def as_flat(self):\n \"\"\"\n :return: A new Grid object where all the ranges are 1d-vectors (flattened or raveled)\n \"\"\"\n shape, step, center, center_at_index, origin_at_center = \\\n self.shape, self.step, self.center, self.center_at_index, self.origin_at_center\n if not self.multidimensional:\n shape, step, center, center_at_index, origin_at_center = \\\n shape[0], step[0], center[0], center_at_index[0], origin_at_center[0]\n return Grid(shape=shape, step=step, center=center, center_at_index=center_at_index,\n flat=True, origin_at_center=origin_at_center)\n\n @property\n def as_non_flat(self):\n \"\"\"\n :return: A new Grid object where all the ranges are 1d-vectors (flattened or raveled)\n \"\"\"\n shape, step, center, center_at_index, origin_at_center = \\\n self.shape, self.step, self.center, self.center_at_index, self.origin_at_center\n if not self.multidimensional:\n shape, step, center, center_at_index, origin_at_center = \\\n shape[0], step[0], center[0], center_at_index[0], origin_at_center[0]\n return Grid(shape=shape, step=step, center=center, center_at_index=center_at_index,\n flat=False, origin_at_center=origin_at_center)\n\n @property\n def as_origin_at_0(self):\n \"\"\"\n :return: A new Grid object where all the ranges are ifftshifted so that the origin as at index 0.\n \"\"\"\n shape, step, center, center_at_index, flat = self.shape, self.step, self.center, self.center_at_index, self.flat\n if not self.multidimensional:\n shape, step, center, center_at_index, flat = shape[0], step[0], center[0], center_at_index[0], flat[0]\n return Grid(shape=shape, step=step, center=center, center_at_index=center_at_index,\n flat=flat, origin_at_center=False)\n\n @property\n def as_origin_at_center(self):\n \"\"\"\n :return: A new Grid object where all the ranges have the origin at the center index, even when the number of\n elements is odd.\n \"\"\"\n shape, step, center, center_at_index, flat = self.shape, self.step, self.center, self.center_at_index, self.flat\n if not self.multidimensional:\n shape, step, center, center_at_index, flat = shape[0], step[0], center[0], center_at_index[0], flat[0]\n return Grid(shape=shape, step=step, center=center, center_at_index=center_at_index,\n flat=flat, origin_at_center=True)\n\n def swapaxes(self, axes: Union[slice, Sequence, np.array]):\n \"\"\"Reverses the order of the specified axes.\"\"\"\n axes = np.array(axes).flatten()\n all_axes = np.arange(self.ndim)\n all_axes[axes] = axes[::-1]\n return self.transpose(all_axes)\n\n def transpose(self, axes: Union[None, slice, Sequence, np.array]=None):\n \"\"\"Reverses the order of all axes.\"\"\"\n if axes is None:\n axes = np.arange(self.ndim-1, -1, -1)\n return self.project(axes)\n\n def project(self, axes_to_keep: Union[int, slice, Sequence, np.array, None]=None,\n axes_to_remove: Union[int, slice, Sequence, np.array, None] = None):\n \"\"\"\n Removes all but the specified axes and reduces the dimensions to the number of specified axes.\n :param axes_to_keep: The indices of the axes to keep.\n :param axes_to_remove: The indices of the axes to remove. Default: None\n :return: A Grid object with ndim == len(axes) and shape == shape[axes].\n \"\"\"\n if axes_to_keep is None:\n axes_to_keep = np.arange(self.ndim)\n elif isinstance(axes_to_keep, slice):\n axes_to_keep = np.arange(self.ndim)[axes_to_keep]\n if np.isscalar(axes_to_keep):\n axes_to_keep = [axes_to_keep]\n axes_to_keep = np.array(axes_to_keep)\n if axes_to_remove is None:\n axes_to_remove = []\n elif isinstance(axes_to_remove, slice):\n axes_to_remove = np.arange(self.ndim)[axes_to_remove]\n if np.isscalar(axes_to_remove):\n axes_to_remove = [axes_to_remove]\n axes_to_keep = np.array([_ for _ in axes_to_keep if _ not in axes_to_remove])\n\n if np.any(axes_to_keep >= self.ndim) or np.any(axes_to_keep < -self.ndim):\n raise IndexError(f\"Axis range {axes_to_keep} requested from a Grid of dimension {self.ndim}.\")\n\n return Grid(shape=self.shape[axes_to_keep], step=self.step[axes_to_keep], center=self.center[axes_to_keep], flat=self.flat[axes_to_keep],\n origin_at_center=self.origin_at_center[axes_to_keep],\n center_at_index=self.center_at_index[axes_to_keep]\n )\n\n #\n # Derived properties\n #\n\n @property\n def first(self) -> np.ndarray:\n \"\"\"\n :return: A vector with the first element of each range\n \"\"\"\n half_shape = self.shape / 2\n half_shape[self.center_at_index] = np.floor(half_shape[self.center_at_index])\n if np.all(np.mod(self.shape[np.logical_not(self.center_at_index)], 2) == 0):\n half_shape = half_shape.astype(np.int)\n return self._center - self.step * half_shape\n\n @property\n def extent(self) -> np.ndarray:\n \"\"\" The spatial extent of the sampling grid. \"\"\"\n return self.shape * self.step\n\n #\n # Sequence methods\n #\n\n @property\n def size(self) -> int:\n \"\"\" The total number of sampling points as an integer scalar. \"\"\"\n return int(np.prod(self.shape))\n\n @property\n def dtype(self):\n \"\"\" The numeric data type for the coordinates. \"\"\"\n return (self.step[0] + self.center[0]).dtype\n\n #\n # Frequency grids\n #\n\n @property\n def f(self):\n \"\"\" The equivalent frequency Grid. \"\"\"\n shape, step, flat = self.shape, 1 / self.extent, self.flat\n if not self.multidimensional:\n shape, step, flat = shape[0], step[0], flat[0]\n\n return Grid(shape=shape, step=step, flat=flat, origin_at_center=False, center_at_index=True)\n\n @property\n def k(self):\n \"\"\" The equivalent k-space Grid. \"\"\"\n return self.f * (2 * np.pi)\n\n #\n # Arithmetic methods\n #\n def __add__(self, term):\n \"\"\" Add a (scalar) offset to the Grid coordinates. \"\"\"\n d = self.__dict__\n new_center = self.center + np.asarray(term)\n if not self.multidimensional:\n new_center = new_center[0]\n d['center'] = new_center\n return Grid(**d)\n\n def __mul__(self, factor: Union[int, float, complex, Sequence, np.array]):\n \"\"\"\n Scales all ranges with a factor.\n :param factor: A scalar factor for all dimensions, or a vector of factors, one for each dimension.\n :return: A new scaled Grid object.\n \"\"\"\n if isinstance(factor, Grid):\n raise TypeError(\"A Grid object can't be multiplied with a Grid object.\"\n + \"Use matmul @ to determine the tensor space.\")\n d = self.__dict__\n factor = np.asarray(factor)\n new_step = self.step * factor\n new_center = self.center * factor\n if not self.multidimensional:\n new_step = new_step[0]\n new_center = new_center[0]\n d['step'] = new_step\n d['center'] = new_center\n return Grid(**d)\n\n def __matmul__(self, other):\n \"\"\"\n Determines the Grid spanning the tensor space, with ndim equal to the sum of both ndims.\n :param other: The Grid with the right-hand dimensions.\n :return: A new Grid with ndim == self.ndim + other.ndim.\n \"\"\"\n return Grid(shape=(*self.shape, *other.shape), step=(*self.step, *other.step),\n center=(*self.center, *other.center),\n flat=(*self.flat, *other.flat),\n origin_at_center=(*self.origin_at_center, *other.origin_at_center),\n center_at_index=(*self.center_at_index, *other.center_at_index)\n )\n\n def __sub__(self, term: Union[int, float, complex, Sequence, np.ndarray]):\n \"\"\" Subtract a (scalar) value from all Grid coordinates. \"\"\"\n return self + (- term)\n\n def __truediv__(self, denominator: Union[int, float, complex, Sequence, np.ndarray]):\n \"\"\" Divide the grid coordinates by a value.\n :param denominator: The denominator to divide by.\n :returns A new Grid with the divided coordinates.\n \"\"\"\n return self * (1 / denominator)\n\n def __neg__(self):\n \"\"\" Invert the coordinate values and the direction of the axes. \"\"\"\n return self.__mul__(-1)\n\n #\n # iterator methods\n #\n\n def __len__(self) -> int:\n \"\"\"\n The number of axes in this sampling grid.\n Or, the number of elements when this object is not multi-dimensional.\n \"\"\"\n if self.multidimensional:\n return self.ndim\n else:\n return self.shape[0] # Behave as a single Sequence\n\n def __getitem__(self, key: Union[int, slice, Sequence]):\n \"\"\"\n Select one or more axes from a multi-dimensional grid,\n or select elements from a single-dimensional object.\n \"\"\"\n # if self.multidimensional:\n # return self.project(key)\n # else:\n # rng = self.center[0] + self.step[0] * (np.arange(self.shape[0]) - (self.shape[0] / 2).astype(np.int))\n # if not self.__origin_at_center[0]:\n # rng = ft.ifftshift(rng)\n # if not self.flat[0]:\n # rng = vector_to_axis(rng, axis=0, ndim=self.ndim)\n #todo: finish the above to replace project with indexing?\n\n scalar_key = np.isscalar(key)\n indices = np.atleast_1d(np.arange(self.ndim if self.multidimensional else self.shape[0])[key])\n result = []\n for idx in indices:\n axis = idx if self.multidimensional else 0 # Behave as a single Sequence\n\n try:\n c, st, sh = self.center[axis], self.step[axis], self.shape[axis]\n except IndexError as err:\n raise IndexError(f\"Axis range {axis} requested from a Grid of dimension {self.ndim}.\")\n rng = c + st * (np.arange(sh) - (sh / 2).astype(np.int))\n if not self._origin_at_center[axis]:\n rng = ft.ifftshift(rng)\n if not self.flat[axis]:\n rng = vector_to_axis(rng, axis=axis, ndim=self.ndim)\n\n result.append(rng if self.multidimensional else rng[idx])\n\n if scalar_key:\n result = result[0] # Unpack again\n\n return result\n\n def __iter__(self):\n for idx in range(len(self)):\n yield self[idx]\n\n #\n # General object properties\n #\n\n @property\n def __dict__(self):\n shape, step, center, flat, center_at_index, origin_at_center = \\\n self.shape, self.step, self.center, self.flat, self.center_at_index, self.origin_at_center\n if not self.multidimensional:\n shape, step, center, flat, center_at_index, origin_at_center = \\\n shape[0], step[0], center[0], flat[0], center_at_index[0], origin_at_center[0]\n return dict(shape=shape, step=step, center=center, flat=flat,\n center_at_index=center_at_index, origin_at_center=origin_at_center)\n\n @property\n def immutable(self):\n \"\"\" Return a new immutable Grid object. \"\"\"\n return Grid(**self.__dict__)\n\n @property\n def mutable(self):\n \"\"\" Return a new MutableGrid object. \"\"\"\n return MutableGrid(**self.__dict__)\n\n def __repr__(self) -> str:\n core_props = self.__dict__.copy()\n core_props['dtype'] = self.dtype\n # core_props['multidimensional'] = self.multidimensional\n arg_desc = ','.join([f'{k}={repr(v)}' for k, v in core_props.items()])\n return f\"{type(self).__name__}({arg_desc:s})\"\n\n def __eq__(self, other) -> bool:\n \"\"\" Compares two Grid objects. \"\"\"\n return type(self) == type(other) and np.all(self.shape == other.shape) and np.all(self.step == other.step) \\\n and np.all(self.center == other.center) and np.all(self.flat == other.flat) \\\n and np.all(self.center_at_index == other.center_at_index) \\\n and np.all(self.origin_at_center == other.origin_at_center) and self.dtype == other.dtype\n\n def __hash__(self) -> int:\n return hash(repr(self))\n\n #\n # Assorted property\n #\n @property\n def multidimensional(self) -> bool:\n \"\"\" Single-dimensional grids behave as Sequences, multi-dimensional behave as a Sequence of vectors. \"\"\"\n return self.__multidimensional\n\n #\n # Protected and private methods\n #\n\n def _to_ndim(self, arg) -> np.array:\n \"\"\"\n Helper method to ensure that all arguments are all numpy vectors of the same length, self.ndim.\n \"\"\"\n if arg is not None:\n arg = np.array(arg).flatten()\n if np.isscalar(arg) or arg.size == 1:\n arg = np.repeat(arg, repeats=self.ndim)\n elif arg.size != self.ndim:\n raise ValueError(\n f\"All input arguments should be scalar or of length {self.ndim}, not {arg.size} as {arg}.\")\n return arg\n\n def __all_to_ndim(self, *args):\n \"\"\"\n Helper method to ensures that all arguments are all numpy vectors of the same length, self.ndim.\n \"\"\"\n return tuple([self._to_ndim(arg) for arg in args])\n\n\nclass MutableGrid(Grid):\n \"\"\"\n A class representing a mutable uniformly-spaced plaid Cartesian grid and its Fourier Transform.\n\n See also: :class:`Grid`\n \"\"\"\n def __init__(self, shape=None, step=None, extent=None, first=None, center=None, last=None, include_last=False,\n ndim: int = None,\n flat: Union[bool, Sequence, np.ndarray] = False,\n origin_at_center: Union[bool, Sequence, np.ndarray] = True,\n center_at_index: Union[bool, Sequence, np.ndarray] = True):\n \"\"\"\n Construct a mutable Grid object.\n\n :param shape: An integer vector array with the shape of the sampling grid.\n :param step: A vector array with the spacing of the sampling grid.\n :param extent: The extent of the sampling grid as shape * step\n :param first: A vector array with the first element for each dimension.\n The first element is the smallest element if step is positive, and the largest when step is negative.\n :param center: A vector array with the center element for each dimension. The center position in the grid is\n rounded to the next integer index unless center_at_index is set to False for that partical axis.\n :param last: A vector array with the last element for each dimension. Unless include_last is set to True for\n the associated dimension, all but the last element is returned when calling self[axis].\n :param include_last: A boolean vector array indicating whether the returned vectors, self[axis], should include\n the last element (True) or all-but-the-last (False)\n :param ndim: A scalar integer indicating the number of dimensions of the sampling space.\n :param flat: A boolean vector array indicating whether the returned vectors, self[axis], should be\n flattened (True) or returned as an open grid (False)\n :param origin_at_center: A boolean vector array indicating whether the origin should be fft-shifted (True)\n or be ifftshifted to the front (False) of the returned vectors for self[axis].\n :param center_at_index: A boolean vector array indicating whether the center of the grid should be rounded to an\n integer index for each dimension. If False and the shape has an even number of elements, the next index is used\n as the center, (self.shape / 2).astype(np.int).\n \"\"\"\n super().__init__(shape=shape, step=step, extent=extent, first=first, center=center, last=last,\n include_last=include_last, ndim=ndim, flat=flat, origin_at_center=origin_at_center,\n center_at_index=center_at_index)\n\n @property\n def shape(self) -> np.array:\n return super().shape\n\n @shape.setter\n def shape(self, new_shape: Union[int, Sequence, np.array]):\n if new_shape is not None:\n self._shape = self._to_ndim(new_shape)\n\n @property\n def step(self) -> np.ndarray:\n return super().step\n\n @step.setter\n def step(self, new_step: Union[int, float, Sequence, np.array]):\n self._step = self._to_ndim(new_step)\n self._center = self._center.astype(self.dtype)\n\n @property\n def center(self) -> np.ndarray:\n return super().center\n\n @center.setter\n def center(self, new_center: Union[int, float, Sequence, np.array]):\n self._center = self._to_ndim(new_center).astype(self.dtype)\n\n @property\n def flat(self) -> np.array:\n return super().flat\n\n @flat.setter\n def flat(self, value: Union[bool, Sequence, np.array]):\n self._flat = self._to_ndim(value)\n\n @property\n def origin_at_center(self) -> np.array:\n return super().origin_at_center\n\n @origin_at_center.setter\n def origin_at_center(self, value: Union[bool, Sequence, np.array]):\n self._origin_at_center = self._to_ndim(value)\n\n @property\n def first(self) -> np.ndarray:\n \"\"\"\n :return: A vector with the first element of each range\n \"\"\"\n return super().first\n\n @first.setter\n def first(self, new_first: Union[int, float, Sequence, np.ndarray]):\n self._center = super().center + self._to_ndim(new_first) - self.first\n\n @property\n def dtype(self):\n \"\"\" The numeric data type for the coordinates. \"\"\"\n return (self.step[0] + self.center[0]).dtype\n\n @dtype.setter\n def dtype(self, new_type: dtype):\n \"\"\" Sets the dtype of the range, updating the step and center coordinate.\"\"\"\n self._step = self._step.astype(new_type)\n self._center = self._center.astype(new_type)\n\n def __iadd__(self, number: Union[int, float, complex, Sequence, np.ndarray]):\n self.center += np.asarray(number)\n\n def __imul__(self, number: Union[int, float, complex, Sequence, np.ndarray]):\n self.step *= np.asarray(number)\n self.center *= np.asarray(number)\n\n def __isub__(self, number: Union[int, float, complex, Sequence, np.ndarray]):\n self.center -= np.asarray(number)\n\n def __idiv__(self, number: Union[int, float, complex, Sequence, np.ndarray]):\n self.step /= np.asarray(number)\n self.center /= np.asarray(number)\n\n"
] |
[
[
"numpy.logical_not",
"numpy.asarray",
"numpy.sign",
"numpy.vectorize",
"numpy.broadcast_to",
"numpy.any",
"numpy.zeros",
"numpy.lib.scimath.sqrt"
],
[
"numpy.logical_not",
"numpy.abs",
"numpy.asarray",
"numpy.arange",
"numpy.all",
"numpy.round",
"numpy.any",
"numpy.floor",
"numpy.isscalar",
"numpy.mod",
"numpy.prod",
"numpy.repeat",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lyuyangdaisy/DS_package
|
[
"ca0f220598ee156028646fbefccde08b2ece62ea",
"ca0f220598ee156028646fbefccde08b2ece62ea",
"ca0f220598ee156028646fbefccde08b2ece62ea",
"ca0f220598ee156028646fbefccde08b2ece62ea"
] |
[
"english/clustering/Kmeans/kmeans.py",
"english/calculator_for_rock/pyroxene/calculator.py",
"chinese/clustering/AP/AP_class.py",
"english/classifier/svm/svm_2/svm_llwr3_1f.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\nfrom sklearn.model_selection import ParameterGrid\nfrom sklearn.base import clone\nfrom sklearn.cluster import KMeans\nfrom sklearn import metrics\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndf = pd.read_excel('4.xlsx')\ndata = df.drop('O7', axis = 1)\nlabels = df['O7']\n\n#The core function, the grid search of unsupervised learning. Please add the required functions on this basis, and finally modify the function name to prevent conflicts\ndef KmeansGridsearch(dmodel, data, param_dict):\n \"\"\"\n dmodel: default model\n data:training data\n labels: real classification\n param_dict: hyperparameter combination dictionary\n \"\"\"\n output_models = []\n # create parameter grid\n # create hyperparametric grid\n param_grid = ParameterGrid(param_dict)\n \n # change the parameter attributes in dbscan according to the param_grid\n # modify the corresponding parameters of DBSCAN object according to the grid hyperparameters, train the model, and get the output data \n for param in param_grid:\n for key, value in param.items():\n setattr(dmodel,key,value)\n dmodel.fit(data)\n model = clone(dmodel)\n output_models.append(model)\n # If you have other data to output, just add it \n return (output_models)\n\n\nkmeans = KMeans()\n# select the parameters to be tested\nkmeans_dict = {'n_clusters':[3,4,5],\n 'init':['k-means++','random']}\noutput = KmeansGridsearch(kmeans,data,kmeans_dict)\n\n# Evaluation criteria for testing\ndef get_marks(estimator, data, name=None):\n \"\"\" To get the score, there are five kinds of actual classification information that are required to know the data set, and there are three kinds that are not required,\n refer to the readme.txt\n \n :param estimator: model\n :param name: initial method\n :param data: feature data set\n \"\"\"\n estimator.fit(data.astype(np.float64))\n print(30 * '*', name, 30 * '*')\n print(\" Model and parameters : \", estimator )\n print(\"Homogeneity Score : \", metrics.homogeneity_score(labels, estimator.labels_))\n print(\"Completeness Score : \", metrics.completeness_score(labels, estimator.labels_))\n print(\"V-Measure Score : \", metrics.v_measure_score(labels, estimator.labels_))\n print(\"Adjusted Rand Score : \", metrics.adjusted_rand_score(labels, estimator.labels_))\n print(\"Adjusted Mutual Info Score: \", metrics.adjusted_mutual_info_score(labels, estimator.labels_))\n print(\"Calinski Harabasz Score: \", metrics.calinski_harabasz_score(data, estimator.labels_))\n print(\"Silhouette Score : \", metrics.silhouette_score(data, estimator.labels_))\n\n# test results\nfor i in range(len(output)):\n get_marks(output[i], data=data, name=\"output\"+ str(i))\n\n# The test results are drawn into images for easy comparison\ndef plotit(estimator, data):\n plt.subplot(3,3,1)\n plt.subplots_adjust(0,0,2,2)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.homogeneity_score(labels, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Homogeneity Score')\n plt.subplot(3,3,2)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.completeness_score(labels, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Completeness Score')\n plt.subplot(3,3,3)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.v_measure_score(labels, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('V-Measure Score')\n plt.subplot(3,3,4)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.adjusted_rand_score(labels, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Adjusted Rand Score')\n plt.subplot(3,3,5)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.adjusted_mutual_info_score(labels, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Adjusted Mutual Info Score')\n plt.subplot(3,3,6)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.calinski_harabasz_score(data, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Calinski Harabasz Score')\n plt.subplot(3,3,7)\n home = []\n for i in range(len(estimator)):\n home.append(metrics.silhouette_score(data, estimator[i].labels_))\n plt.axvline(x=i,linestyle='--',linewidth=1,color='red')\n plt.plot(home)\n plt.title('Silhouette Score')\n\nplotit(output,data)\n\n\n\n\n\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport time\n\ndef find_num(major_el_name):\n \"\"\"\n Find the number of cations and the number of oxygen atoms of the principal element in the listing\n\n :param major_el_name: Listing of principal elements\n :return: Number of cations and number of oxygen atoms\n \"\"\"\n length = len(major_el_name)\n temp_ion_num = [re.findall('\\d?O', major_el_name[i], re.I) for i in range(length)]\n ion_num = []\n for i in range(length):\n ion_num.extend(temp_ion_num[i])\n for j in range(length):\n ion_num[j] = re.findall('\\d*', ion_num[j])[0]\n if ion_num[j] == '':\n ion_num[j] = 1\n else:\n ion_num[j] = int(ion_num[j])\n\n temp_oxy_num = [re.findall('O\\d?', major_el_name[i], re.I) for i in range(length)]\n oxy_num = []\n for i in range(length):\n oxy_num.extend(temp_oxy_num[i])\n for j in range(length):\n oxy_num[j] = re.findall('\\d*', oxy_num[j])[1]\n if oxy_num[j] == '':\n oxy_num[j] = 1\n else:\n oxy_num[j] = int(oxy_num[j])\n return ion_num, oxy_num\n\ndef find_ion(major_el_name):\n \"\"\"\n Find the cation in the principal element of the listing\n\n :param major_el_name: The name of the main element column\n :return: cations\n \"\"\"\n length = len(major_el_name)\n temp = []\n for i in range(length):\n a = re.findall('[a-zA-Z]{1,2}[\\d*]?', major_el_name[i], re.I)\n temp.append(a[0])\n ion = []\n for i in range(length):\n ion.extend(re.findall('[a-zA-Z]{1,2}', temp[i], re.I))\n return ion\n\ndef rel_mole_weight(ion, ion_num, oxy_num):\n \"\"\"\n Calculating Relative Molecular Weight\n\n :param ion: Each cation\n :param ion_num: Number of cations per cation\n :param oxy_num: The number of oxygen atoms corresponding to each cation\n :return: Relative molecular weight\n \"\"\"\n ion_dict = {'Si':28.085, 'Ti':47.867, 'Al':26.981, 'Cr':51.996, 'Fe':55.845, 'Mn':54.938,\n 'Mg':24.305, 'Ca':40.078, 'Na':22.989, 'K':39.098, 'P':30.974, 'Ni':58.693,\n 'Zn':65.390, 'Li':6.941, 'Zr':91.224, 'V':50.941, 'O':15.999}\n length = len(ion)\n if length != len(ion_num) or length != len(oxy_num):\n raise Exception\n\n relative_molecular_weight = []\n for i in range(length):\n a = ion_dict[ion[i]] * ion_num[i] + ion_dict['O'] * oxy_num[i]\n relative_molecular_weight.append(a)\n return relative_molecular_weight\n\ndef conver_ratio(rmw, oxy_num, mf):\n \"\"\"\n Calculation of conversion factors\n\n :param rmw: Relative molecular weight\n :param mf: Mass fraction of the principal element\n :return: Value of the conversion factor\n \"\"\"\n conversion_ratio = float(6) / sum(np.array(oxy_num) * np.array(mf) / np.array(rmw))\n return conversion_ratio\n\ndef output(cr, rmw, ion_num, mf):\n '''\n Calculate the output y for each cation\n\n :param cr: conversion factor\n :param rmw: Relative molecular weight\n :param ion_num: Number of cations\n :param mf: Mass fraction of the principal element\n :return: Output y of each cation\n '''\n y = cr * np.array(mf) * np.array(ion_num) / np.array(rmw)\n return y\n\ndef projection(index, target, y):\n '''\n Calculation of the projection value of a specific cation in the range of 0 to 1\n\n :param index: Index to the specified cation list\n :param target: List of specified cations\n :param y: Output value of each cation y\n :return: Projected values of specific cations\n '''\n sum = 0\n for i in range(len(target)):\n sum += np.array(y[target[i]])\n # sum = np.array(y[target[0]]) + np.array(y[target[1]]) + np.array(y[target[2]])\n proj = np.array(y[target[index]]) / sum\n return proj\n\n\ndef main():\n start_time = time.time()\n print(\"读取文件............\")\n data = pd.read_excel('cal_data_4th.xlsx') # Read the data set\n data.fillna(0, inplace=True) # The interpolation value is zero, there can be no null value\n\n data_columns = list(data.columns)\n # print(\"列名:\", data_columns) # Listing name: principal element\n\n ion_num, oxy_num = find_num(data_columns)\n ion = find_ion(data_columns)\n # print(\"阳离子: \", ion) # Demonstrate cation\n # print(\"阳离子个数: \", ion_num) # Number of cations\n # print(\"氧原子个数: \",oxy_num) # Number of oxygen atoms\n # print(\"维度:\", len(ion), len(ion_num), len(oxy_num)) # Compare whether the latitudes are the same\n\n rmw = rel_mole_weight(ion, ion_num, oxy_num)\n # print(\"相对分子质量:\", np.array(rmw)) # Relative molecular weight\n cr_columns = []\n data_num = data.shape[0]\n for i in range(data_num):\n a = data.iloc[i, :]\n cr = conver_ratio(rmw, oxy_num, a) # Calculation of conversion factors\n cr_columns.append(cr) # Preservation of conversion factors\n\n temp = []\n for j in range(data_num):\n b = data.iloc[j, :]\n y = output(cr_columns[j], rmw, ion_num, b) # Calculate the output value y for each cation\n temp.append(y) # Save output value y\n temp_df = pd.DataFrame(temp) # New DataFrame table to save the output value y\n temp_df.columns = ion # Adds a column name to the DataFrame table with output value y\n # print(temp_df)\n data['换算系数'] = np.array(cr_columns).reshape(-1, 1) # Add a new column [conversion factor] to the original data set [data]\n # print(data['换算系数'])\n # print(data) # Original data set with conversion factors\n new_df = pd.concat([data, temp_df], axis=1) # Merge the DataFrame table of the original dataset with the DataFrame table of the output value y\n # print(new_df) # Data set containing conversion coefficients and y columns of output values for each cation\n\n target = ['Fe', 'Mg', 'Ca'] # Selected cations to be projected\n df1 = new_df[target]\n target_list = []\n for i in range(data_num):\n y = df1.iloc[i, :]\n ls = []\n for j in range(len(target)):\n proj = projection(j, target, y) # Calculation of the projected value of a given cation\n ls.append(proj) # Save projection values\n #print(ls)\n target_list.append(ls)\n target_df = pd.DataFrame(target_list) # New DataFrame table to save projected values\n # print(pd.DataFrame(target_list))\n project_name = [target[i] + '_projected' for i in range(len(target))] # Constructing new listings with projected values\n target_df.columns = project_name # Adds a column name to a DataFrame table that holds the projected values\n final_df = pd.concat([new_df, target_df], axis=1) # Combination of raw data tables with conversion factors and output values and DF tables with stored projection values\n # print(final_df) # The final form we'll need\n\n final_df.to_csv(\"new_cal_data_4th.csv\") # Save the final table as a csv file\n\n end_time = time.time()\n print(\"程序运行时间:{}s\".format(end_time-start_time))\n \n \nif __name__ == '__main__':\n main()\n\n\n\n",
"import pandas as pd\nfrom sklearn.model_selection import GridSearchCV,ParameterGrid\nfrom sklearn.base import clone\nfrom sklearn.cluster import AffinityPropagation\nfrom sklearn import metrics\nimport numpy as np\nimport joblib\nimport datetime\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import learning_curve\n\nclass AP:\n def APGridsearch(self,dmodel, data, labels, param_dict):\n \"\"\"\n dmodel: 默认模型\n data:训练数据\n labels: 真实分类\n param_dict: 超参数组合字典\n \"\"\"\n output_models = []\n\n # create parameter grid\n # 构建超参数网格\n param_grid = ParameterGrid(param_dict)\n\n # change the parameter attributes in dbscan according to the param_grid\n # 依据网格超参数修改dbscan object 的对应参数,训练模型,得出输出数据\n for param in param_grid:\n for key, value in param.items():\n setattr(dmodel, key, value)\n\n dmodel.fit(data)\n model = clone(dmodel)\n output_models.append(model)\n # 如果有其他需要输出的数据,继续往里面添加就可以\n return (output_models)\n\n # 评价标准,测试用,非最后的模块化后的功能块\n\n def get_marks(self,estimator, data, labels, name=None):\n \"\"\"获取评分,有五种需要知道数据集的实际分类信息,有三种不需要,参考readme.txt\n\n :param estimator: 模型\n :param name: 初始方法\n :param data: 特征数据集\n \"\"\"\n estimator.fit(data.astype(np.float64))\n print(30 * '*', name, 30 * '*')\n print(\" 模型及参数: \", estimator)\n print(\"Homogeneity Score (均一性): \", metrics.homogeneity_score(labels, estimator.labels_))\n print(\"Completeness Score (完整性): \", metrics.completeness_score(labels, estimator.labels_))\n print(\"V-Measure Score (V量): \", metrics.v_measure_score(labels, estimator.labels_))\n print(\"Adjusted Rand Score (调整后兰德指数): \", metrics.adjusted_rand_score(labels, estimator.labels_))\n print(\"Adjusted Mutual Info Score(调整后的共同信息): \", metrics.adjusted_mutual_info_score(labels, estimator.labels_))\n print(\"Calinski Harabasz Score: (方差比指数) \", metrics.calinski_harabasz_score(data, estimator.labels_))\n print(\"Silhouette Score (轮廓分数): \", metrics.silhouette_score(data, estimator.labels_))\n\n def read_para(self):\n para = pd.read_excel('para.xlsx', header=None, dtype='object')\n dic = para.set_index(0).T.to_dict('list')\n for i in dic:\n dic[i] = [x for x in dic[i] if x == x]\n return dic\n\n def plot_learning_curve(self,model, data, labels):\n train_sizes, train_scores, test_scores = learning_curve(model, data, labels,\n scoring='adjusted_rand_score', cv=5)\n train_scores_mean = np.mean(train_scores, axis=1) # 将训练得分集合按行的到平均值\n train_scores_std = np.std(train_scores, axis=1) # 计算训练矩阵的标准方差\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid() # 背景设置为网格线\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,\n alpha=0.1,\n color='r')\n # plt.fill_between()函数会把模型准确性的平均值的上下方差的空间里用颜色填充。\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1,\n color='g')\n plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label='Training score')\n # 然后用plt.plot()函数画出模型准确性的平均值\n plt.plot(train_sizes, test_scores_mean, 'o-', color='g', label='Cross_validation score')\n plt.legend(loc='best') # 显示图例\n plt.show()",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport xlrd\n#import xlwt\nimport csv\nimport codecs\nfrom sklearn import svm\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import f1_score\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nkernel_list = ['linear', 'poly', 'rbf', 'sigmoid'] # kernel function list\n\ndef input_float_list():\n \"\"\"\n Converts each element in the list to a real output\n\n :return: real number list\n \"\"\"\n a = input().split()\n b = [float(a[i]) for i in range(len(a))]\n return b\n\ndef read_txt(filename):\n \"\"\"\n read txt file \n\n :param filename: file name\n :return: A list of file contents\n \"\"\"\n pos=[]\n with open(filename, 'r') as file_to_read:\n while True:\n lines = file_to_read.readline()\n if not lines:\n break\n pass\n new_row_list = [float(i) for i in lines.split()]\n pos.append(new_row_list)\n pass\n pass\n return pos\n\ndef para_file_input(pos):\n \"\"\"\n Read the necessary parameters in the file\n\n :param pos: File content list\n :return: Return the required parameters\n \"\"\"\n svm_num = int(pos[0][0])\n kernel_num = int(pos[1][0])\n C = np.linspace(pos[2][0], pos[2][1], int(pos[2][2]))\n gamma = np.linspace(pos[3][0], pos[3][1], int(pos[3][2]))\n degree = pos[4][0]\n coef0 = pos[5][0]\n pre = pos[6][0]\n prepro = pos[7][0]\n return svm_num, kernel_num, C, gamma, degree, coef0, pre, prepro\n\ndef read_xlsx(ncol_ini, ncol_fin, filename): # This function does not check the boundary. Please check the boundary value when you use it\n \"\"\"\n read xlsx file data\n\n :param ncol_ini:\n :param ncol_fin:\n :param filename: file name\n :return: data list\n \"\"\"\n data_list = []\n file_object = xlrd.open_workbook(filename)\n sheetnames = file_object.sheet_names()\n sheetwork = file_object.sheet_by_name(sheetnames[0])\n nrows = sheetwork.nrows\n\n for i in range(nrows):\n new_row_list=[]\n for j in range(ncol_ini-1, ncol_fin):\n data = sheetwork.cell_value(i, j)\n new_row_list.append(data)\n data_list.append(new_row_list)\n \n return data_list\n\ndef xlsx_ncol(filename):\n \"\"\"\n Read the xlsx file data label\n\n :param filename: file name\n :return: data list\n \"\"\"\n file_object = xlrd.open_workbook(filename)\n sheetnames = file_object.sheet_names()\n sheetwork = file_object.sheet_by_name(sheetnames[0])\n ncols = sheetwork.ncols\n\n return ncols\n\ndef preprocess(data_list, prepro):\n \"\"\"\n data per-process\n :param data_list: data list\n :param prepro: process method\n :return: processed data list\n \"\"\"\n if prepro == 0:\n data_list_transformed = data_list\n elif prepro == 1:\n data_list_transformed = preprocessing.RobustScaler().fit_transform(data_list) # Standardized data by IQR.Between the quarter and the third quarter sites\n elif prepro == 2:\n data_list_transformed = preprocessing.MinMaxScaler().fit_transform(data_list) # Narrow down to [0, 1]\n elif prepro == 3:\n data_list_transformed = preprocessing.MaxAbsScaler().fit_transform(data_list) #Narrow down to [-1.0, 1.0]\n elif prepro == 4:\n data_list_transformed = preprocessing.StandardScaler().fit_transform(data_list) # The normalized\n return data_list_transformed\n\ndef fit(data_list, label_list, kernel_num, C, gamma, decision_function, degree, coef0, sv_num): # Training data\n\n\n if sv_num == 0:\n svc = svm.SVC(kernel=kernel_list[kernel_num], C=C, gamma=gamma, decision_function_shape=decision_function,\n degree=degree, coef0=coef0, class_weight='balanced', cache_size=500)\n classifier = svc.fit(data_list, label_list)\n elif sv_num == 1:\n linear_svc = svm.LinearSVC(penalty='l2', loss='squared_hinge', tol=gamma,\n C=C, class_weight='balanced', max_iter=100000000)\n classifier = linear_svc.fit(data_list, label_list)\n return classifier\n\ndef predict(classifier, data_list):\n \"\"\"\n predict data\n\n :param classifier: svm model\n :param data_list: data list\n :return: predict result\n \"\"\"\n label_list = classifier.predict(data_list)\n return label_list\n\ndef evaluate(train_label, test_label, tra_label, tes_label, C, gamma, degree, coef0, prepro, decision_function): # 评价,如果嫌麻烦可以自行去掉\n print('C: ', C)\n print('gamma: ', gamma)\n print('degree: ', degree)\n print('coef0: ', coef0)\n print('preprocess: ', prepro)\n print('decision_function_shape: ', decision_function)\n print('accuracy:')\n print('train set:', accuracy_score(train_label, tra_label))\n print('test set:', accuracy_score(test_label, tes_label))\n print('precision:')\n print('train set:', precision_score(train_label, tra_label))\n print('test set:', precision_score(test_label, tes_label))\n print('recall:')\n print('train set:', recall_score(train_label, tra_label))\n print('test set:', recall_score(test_label, tes_label))\n print('roc_auc:')\n print('train set:', roc_auc_score(train_label, tra_label))\n print('test set:', roc_auc_score(test_label, tes_label))\n print('f1:')\n print('train set:', f1_score(train_label, tra_label))\n print('test set:', f1_score(test_label, tes_label))\n\ndef cross_validation(classifier, data_list, label_list): # Cross validation method\n scoring = ['accuracy', 'precision', 'recall', 'f1-samples', 'roc_auc']\n scores = cross_val_score(classifier, data_list, label_list, scoring='f1', cv=10)\n print(scores)\n score_mean = scores.mean()\n score_std = scores.std()\n print('score: %0.3f(+/- %0.3f)' % (scores.mean(), scores.std()))\n return score_mean, score_std\n\n\ndef plot_learning_curve(classifier, data_list, label_list, output_png): # Draw a learning curve\n\n train_sizes, train_scores, test_scores = learning_curve(classifier, data_list, label_list, cv=10, n_jobs=-1,\n train_sizes=np.linspace(0.1,1.0,50), scoring='accuracy')\n\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label='training')\n plt.fill_between(train_sizes, train_scores_mean-train_scores_std,\n train_scores_mean+train_scores_std,alpha=0.1, color='r')\n plt.plot(train_sizes, test_scores_mean, 'o-', color='b', label='cross_validation')\n plt.fill_between(train_sizes, test_scores_mean-test_scores_std,\n test_scores_mean+test_scores_std, alpha=0.1, color='b')\n plt.xlabel('train sizes')\n plt.ylabel('f1')\n plt.legend(loc='best')\n # plt.show()\n fig = plt.savefig(output_png)\n return fig\n\n\ndef predict_write_xls(classifier, predict_data_list, output_xls, prepro): # Classification, recommended to write as xls file, otherwise possible error\n data_list_transformed = preprocess(predict_data_list, prepro)\n label_list = classifier.predict(data_list_transformed)\n\n wrong_list = []\n f = codecs.open(output_xls, 'w', 'utf-8')\n writer = csv.writer(f)\n for i in range(len(label_list)):\n # writer.writerow(str(label_list[i]))\n new_row_list = []\n new_row_list.append(label_list[i])\n wrong_list.append(new_row_list)\n\n writer.writerows(wrong_list)\n\n f.close()\n\ndef write_wrong_label(classifier, data_list, label_list, output_xls): # Output sample of mis-classification\n predict_label_list=classifier.predict(data_list)\n\n wrong_list = []\n f = codecs.open(output_xls, 'w', 'utf-8')\n writer = csv.writer(f)\n for i in range(len(data_list)):\n if predict_label_list[i] != label_list[i]:\n new_row_list = []\n new_row_list.append(i+1)\n wrong_list.append(new_row_list)\n\n writer.writerows(wrong_list)\n f.close()\n\ndef max_index(list2):\n \"\"\"\n Look for the maximum's index of two-dimensional list\n\n :param list2: two-dimensional list\n :return: maximum's index\n \"\"\"\n a = np.array(list2)\n m, n = a.shape\n index = int(a.argmax())\n x = int(index / n)\n y = index % n\n return x, y\n\ndef main(argv):\n\n decision_function = 'ovo' # Enter parameters , C and Gamma can enter multiple values, separated by Spaces\n filename = argv[2]\n options = read_txt(argv[1])\n output_png = argv[4]\n output_xls = argv[3]\n svm_num, kernel_num, C, gamma, degree, coef0, pre, prepro = para_file_input(options)\n\n ncols = xlsx_ncol(filename) # Read data and labels\n\n label_list = []\n file_object = xlrd.open_workbook(filename)\n sheetnames = file_object.sheet_names()\n sheetwork = file_object.sheet_by_name(sheetnames[0])\n nrows = sheetwork.nrows\n\n for i in range(nrows):\n label = sheetwork.cell_value(i, 0)\n label_list.append(int(label))\n\n data_list=read_xlsx(2, ncols, filename)\n \n predict_data_list = []\n train_data_list = []\n train_label_list = []\n if pre == 2:\n for i in range(nrows):\n if label_list[i] == 0:\n predict_data_list.append(data_list[i])\n else:\n train_data_list.append(data_list[i])\n train_label_list.append(label_list[i])\n\n data_list = train_data_list\n label_list = train_label_list\n\n\n data_list_transformed = preprocess(data_list, prepro)\n\n train_data, test_data, train_label, test_label = train_test_split(data_list_transformed, label_list, test_size=0.1)\n\n scores_mean = [] # start training\n clf_list = []\n for i in range(len(C)):\n new_row_list = []\n new_clf_list = []\n for j in range(len(gamma)):\n classifier = fit(train_data, train_label, kernel_num, C[i], gamma[j],\n decision_function, degree, coef0, svm_num)\n new_clf_list.append(classifier)\n tra_label = classifier.predict(train_data)\n tes_label = classifier.predict(test_data)\n evaluate(train_label, test_label, tra_label, tes_label, C[i],\n gamma[j], degree, coef0, prepro, decision_function)\n score_mean, score_std=cross_validation(classifier, data_list_transformed, label_list)\n new_row_list.append(score_mean)\n clf_list.append(new_clf_list)\n scores_mean.append(new_row_list)\n\n max_C, max_gamma = max_index(scores_mean) # Look for the highest score and its subscripts\n\n classifier = clf_list[max_C][max_gamma] #The optimal model/the model with the highest score\n print('The optimal model/the model with the highest score:')\n tra_label = classifier.predict(train_data)\n tes_label = classifier.predict(test_data)\n evaluate(train_label, test_label, tra_label, tes_label, C[max_C],\n gamma[max_gamma], degree, coef0, prepro, decision_function)\n cross_validation(classifier, data_list_transformed, label_list)\n\n if pre == 2: # classfy new data by using the optimal model \n predict_write_xls(classifier, predict_data_list, output_xls, prepro)\n if pre == 3:\n write_wrong_label(classifier, data_list_transformed, label_list, output_xls)\n\n plot_learning_curve(classifier, train_data, train_label, output_png) # Draw a learning curve\n \nif __name__ == '__main__':\n main(sys.argv)\n"
] |
[
[
"pandas.read_excel",
"matplotlib.pyplot.axvline",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.title",
"sklearn.metrics.silhouette_score",
"sklearn.metrics.v_measure_score",
"sklearn.metrics.homogeneity_score",
"sklearn.metrics.completeness_score",
"matplotlib.pyplot.plot",
"sklearn.base.clone",
"sklearn.model_selection.ParameterGrid",
"matplotlib.pyplot.subplot",
"sklearn.metrics.adjusted_mutual_info_score",
"matplotlib.pyplot.subplots_adjust",
"sklearn.metrics.adjusted_rand_score",
"sklearn.metrics.calinski_harabasz_score"
],
[
"numpy.array",
"pandas.concat",
"pandas.read_excel",
"pandas.DataFrame"
],
[
"matplotlib.pyplot.legend",
"pandas.read_excel",
"sklearn.metrics.silhouette_score",
"sklearn.model_selection.learning_curve",
"sklearn.metrics.v_measure_score",
"sklearn.metrics.homogeneity_score",
"sklearn.metrics.completeness_score",
"matplotlib.pyplot.plot",
"sklearn.base.clone",
"numpy.std",
"sklearn.model_selection.ParameterGrid",
"numpy.mean",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.grid",
"sklearn.metrics.adjusted_mutual_info_score",
"sklearn.metrics.adjusted_rand_score",
"matplotlib.pyplot.show",
"sklearn.metrics.calinski_harabasz_score"
],
[
"matplotlib.pyplot.legend",
"sklearn.metrics.roc_auc_score",
"numpy.linspace",
"sklearn.preprocessing.MaxAbsScaler",
"matplotlib.pyplot.plot",
"numpy.mean",
"sklearn.svm.LinearSVC",
"sklearn.metrics.f1_score",
"sklearn.preprocessing.MinMaxScaler",
"numpy.std",
"sklearn.metrics.precision_score",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.fill_between",
"sklearn.svm.SVC",
"numpy.array",
"sklearn.metrics.recall_score",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.cross_val_score",
"sklearn.preprocessing.RobustScaler",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler",
"sklearn.metrics.accuracy_score"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dsergio/data-modeling
|
[
"eff6a05c63df4cf8192169abdad01ab2b3854958"
] |
[
"python/kmeansDaysSinceStormNumObs.py"
] |
[
"\"\"\"\nAuthor: David Sergio\n\nKMeans Clustering\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n# import matplotlib as plt\nimport matplotlib.pyplot as plt\nfrom numpy import nan\n\nfrom sklearn.cluster import KMeans\n\n\nweather_observation_data_file = \"..\\\\transform\\\\stage6\\\\all_weather_obs_dates.csv\"\nweather_observation_data = pd.read_csv(weather_observation_data_file)\n\nfor col in weather_observation_data.columns: \n\tweather_observation_data[col] = weather_observation_data[col].replace(\"\", nan)\n\tweather_observation_data[col] = weather_observation_data[col].replace(\" \", nan)\n\tweather_observation_data[col] = weather_observation_data[col].replace(\"NA\", nan)\n\tweather_observation_data[col] = weather_observation_data[col].replace(\"?\", nan)\n\n\nquery = \"numberObservations > 0\"\nquery_result = weather_observation_data.query(query)\n\ncol_list = [\"days_since_storm\", \"numberObservations\"]\nquery_result = query_result[col_list]\n\nprint(query_result)\n\nkmeans = KMeans(n_clusters=3).fit(query_result)\ncentroids = kmeans.cluster_centers_\nprint(centroids)\n\nplt.scatter(query_result['days_since_storm'], query_result['numberObservations'], c= kmeans.labels_.astype(float), s=50, alpha=0.5)\nplt.scatter(centroids[:, 0], centroids[:, 1], c='red', s=50)\nplt.xlabel(\"Days Since Storm\")\nplt.ylabel(\"Number Observations\")\nplt.title(\"KMeans Days Since Storm / Number Observations\")\nplt.savefig(\".\\\\plots\\\\kmeansDaysSinceStormNumObs.png\")"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rupakgoyal/panel-
|
[
"4e1e01e1766ebfc2fc1efb409734fd51efc60c01"
] |
[
"panel/tests/pane/test_vega.py"
] |
[
"from __future__ import absolute_import\n\nimport pytest\n\ntry:\n import altair as alt\nexcept:\n alt = None\naltair_available = pytest.mark.skipif(alt is None, reason=\"requires altair\")\n\nimport numpy as np\n\nfrom panel.models.vega import VegaPlot\nfrom panel.pane import Pane, PaneBase, Vega\n\nblank_schema = {'$schema': ''}\n\nvega_example = {\n 'config': {\n 'mark': {'tooltip': None},\n 'view': {'height': 300, 'width': 400}\n },\n 'data': {'values': [{'x': 'A', 'y': 5},\n {'x': 'B', 'y': 3},\n {'x': 'C', 'y': 6},\n {'x': 'D', 'y': 7},\n {'x': 'E', 'y': 2}]},\n 'mark': 'bar',\n 'encoding': {'x': {'type': 'ordinal', 'field': 'x'},\n 'y': {'type': 'quantitative', 'field': 'y'}},\n '$schema': 'https://vega.github.io/schema/vega-lite/v3.2.1.json'\n}\n\nvega_inline_example = {\n 'config': {\n 'view': {'width': 400, 'height': 300},\n 'mark': {'tooltip': None}},\n 'data': {'name': 'data-2f2c0ff233b8675aa09202457ebe7506',\n 'format': {'property': 'features', 'type': 'json'}},\n 'mark': 'geoshape',\n 'encoding': {\n 'color': {\n 'type': 'quantitative',\n 'field': 'properties.percent_no_internet'\n }\n },\n 'projection': {'type': 'albersUsa'},\n '$schema': 'https://vega.github.io/schema/vega-lite/v3.2.1.json',\n 'datasets': {\n 'data-2f2c0ff233b8675aa09202457ebe7506': {\n 'type': 'FeatureCollection',\n 'features': [\n {'id': '0',\n 'type': 'Feature',\n 'properties': {\n 'name': 'Autauga County, Alabama',\n 'percent_no_internet': 0.2341122827016244,\n 'percent_no_internet_normalized': 0.2589760005042632},\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [[[-86.411786, 32.706342],\n [-86.411786, 32.410587],\n [-86.499417, 32.344863],\n [-86.817079, 32.339387],\n [-86.915664, 32.662526],\n [-86.411786, 32.706342]]]\n }\n }\n ]\n }\n }\n}\n\ndef test_get_vega_pane_type_from_dict():\n assert PaneBase.get_pane_type(vega_example) is Vega\n\n\ndef test_vega_pane(document, comm):\n pane = Pane(vega_example)\n\n # Create pane\n model = pane.get_root(document, comm=comm)\n assert isinstance(model, VegaPlot)\n\n expected = dict(vega_example, data={})\n\n assert dict(model.data, **blank_schema) == dict(expected, **blank_schema)\n cds_data = model.data_sources['data'].data\n assert np.array_equal(cds_data['x'], np.array(['A', 'B', 'C', 'D', 'E'])) \n assert np.array_equal(cds_data['y'], np.array([5, 3, 6, 7, 2]))\n\n point_example = dict(vega_example, mark='point')\n point_example['data']['values'][0]['x'] = 'C'\n pane.object = point_example\n point_example['data'].pop('values')\n assert model.data == point_example\n cds_data = model.data_sources['data'].data\n assert np.array_equal(cds_data['x'], np.array(['C', 'B', 'C', 'D', 'E'])) \n assert np.array_equal(cds_data['y'], np.array([5, 3, 6, 7, 2]))\n\n pane._cleanup(model)\n assert pane._models == {}\n\n\ndef test_vega_pane_inline(document, comm):\n pane = Pane(vega_inline_example)\n\n # Create pane\n model = pane.get_root(document, comm=comm)\n assert isinstance(model, VegaPlot)\n\n assert dict(model.data, **blank_schema) == dict(vega_inline_example, **blank_schema)\n assert model.data_sources == {}\n\n pane._cleanup(model)\n assert pane._models == {}\n \n\ndef altair_example():\n import altair as alt\n data = alt.Data(values=[{'x': 'A', 'y': 5},\n {'x': 'B', 'y': 3},\n {'x': 'C', 'y': 6},\n {'x': 'D', 'y': 7},\n {'x': 'E', 'y': 2}])\n chart = alt.Chart(data).mark_bar().encode(\n x='x:O', # specify ordinal data\n y='y:Q', # specify quantitative data\n )\n return chart\n\n\n@altair_available\ndef test_get_vega_pane_type_from_altair():\n assert PaneBase.get_pane_type(altair_example()) is Vega\n\n\n@altair_available\ndef test_altair_pane(document, comm):\n pane = Pane(altair_example())\n\n # Create pane\n model = pane.get_root(document, comm=comm)\n assert isinstance(model, VegaPlot)\n\n expected = dict(vega_example, data={})\n assert dict(model.data, **blank_schema) == dict(expected, **blank_schema)\n\n cds_data = model.data_sources['data'].data\n assert np.array_equal(cds_data['x'], np.array(['A', 'B', 'C', 'D', 'E'])) \n assert np.array_equal(cds_data['y'], np.array([5, 3, 6, 7, 2]))\n\n chart = altair_example()\n chart.mark = 'point'\n chart.data.values[0]['x'] = 'C'\n pane.object = chart\n point_example = dict(vega_example, mark='point')\n assert dict(model.data, **blank_schema) == dict(point_example, **blank_schema)\n cds_data = model.data_sources['data'].data\n assert np.array_equal(cds_data['x'], np.array(['C', 'B', 'C', 'D', 'E'])) \n assert np.array_equal(cds_data['y'], np.array([5, 3, 6, 7, 2]))\n\n pane._cleanup(model)\n assert pane._models == {}\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Waziup/SoilMoistureML
|
[
"26c8ec9ff51889d51b8dbd76c26d7b168282b447"
] |
[
"competition/trans.py"
] |
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport pylab\nfrom scipy.signal import argrelextrema\nimport numpy as np\nfrom scipy import signal\n\n\n# Get Sensor observations\ndef getObs(myfile, name):\n\n # read the CSV file, parsing dates and dropping one useless collumn\n obs = pd.read_csv(myfile,\n sep=',',\n parse_dates=[0]).drop('date_received',1);\n \n #Force numerical values in the collumn 'value'\n obs['value'] = obs['value'].apply(pd.to_numeric, errors='coerce')\n\n #Remove duplicates (two measurements with the same time)\n obs2 = obs.drop_duplicates('timestamp', keep='last').set_index('timestamp')\n\n #Read dates with specific format\n obs2.index = pd.to_datetime(obs2.index, format='%Y-%m-%dT%H:%M:%S.%fZ')\n\n #Resample the values: every intermediate values will be averaged and place at the right time\n obs3 = obs2.resample('5min').mean()\n\n #Filling missing values\n obs3['value'].interpolate('time', inplace=True, limit_direction='both')\n\n #rounding all values to 2 decimal places\n obs4 = obs3.round({'value': 2})\n \n #NEw collumn for \"Irrigation\" (on/off)\n iri = name + ' irrigation'\n\n #Find the extremum of the humidity (when the humidity shoots up) => irrigation is on\n obs4[iri] = obs4.iloc[argrelextrema(obs4.value.values, np.less_equal, order=200)[0]]['value']\n\n #replace \"NaN\" with 0 \n obs4[iri] = obs4[iri].fillna(0)\n obs4.loc[obs4[iri] != 0, iri] = 1\n\n obs4.rename(columns={'value': name + ' humidity'}, inplace=True)\n return(obs4);\n\ndef getWeather(myfile, name):\n obs = pd.read_csv(myfile,\n sep=',',\n parse_dates=[4]).drop(['device_id', 'sensor_id', 'date_received'], axis=1);\n \n obs['value'] = obs['value'].apply(pd.to_numeric, errors='coerce')\n obs2 = obs.drop_duplicates('timestamp', keep='last').set_index('timestamp')\n obs2.index = pd.to_datetime(obs2.index, format='%Y-%m-%dT%H:%M:%SZ', errors='coerce')\n obs3 = obs2.resample('5min').mean()\n obs3['value'].interpolate('time', inplace=True, limit_direction='both')\n obs4 = obs3.round({'value': 2})\n obs4.rename(columns={'value': name}, inplace=True)\n return(obs4);\n\n#obs1 = getObs('UGB-PILOTS_Sensor81-SH.csv', 'Soil humidity 1');\n#obs1['obs1 min'] = obs1.soil[(obs1.soil.shift(1) > obs1.soil) & (obs1.soil.shift(-1) > obs1.soil)];\n\n#obs1['obs1 min'] = obs1.iloc[argrelextrema(obs1.soil.values, np.less_equal, order=300)[0]]['Soil humidity 1']\n\n\nobs = [getObs('data/UGB-PILOTS_Sensor80-SH.csv', 'Plot 1'),\n getObs('data/UGB-PILOTS_Sensor81-SH.csv', 'Plot 2'),\n getObs('data/UGB-PILOTS_Sensor82-SH.csv', 'Plot 3'),\n getObs('data/UGB-PILOTS_Sensor84-SH.csv', 'Plot 4'),\n getWeather('data/TP.csv', 'Air temperature (C)'),\n getWeather('data/PA.csv', 'Pressure (KPa)'),\n getWeather('data/WS.csv', 'Wind speed (Km/h)'),\n getWeather('data/WG.csv', 'Wind gust (Km/h)'),\n getWeather('data/WD.csv', 'Wind direction (Deg)'),\n ]\n\nmerged = reduce(lambda x, y: pd.merge(x, y, on = 'timestamp', how='outer'), obs)\n\n#merged = merged.drop(pd.date_range('2018-01-01', '2019-03-12'), errors='ignore')\n#merged = merged[~merged['timestamp'].isin(pd.date_range(start='20150210', end='20190312'))]\n\nmerged.fillna(0)\nmerged = merged.loc['2019-02-23':'2019-06-20']\n#merged = merged.loc['2019-01-01':'2019-06-20']\n\n# Print the first 5 entries\nprint(merged.head(10));\n#print(obs1)\nmerged.to_csv('test2.csv');\n# Make the graphs a bit prettier\n#pd.set_option('display.mpl_style', 'default')\nplt.rcParams['figure.figsize'] = (18, 5)\n#plt.scatter(merged.index, merged['Plot 4 irrigation'])\nplt.plot(merged.index, merged[['Plot 3 humidity','Plot 3 irrigation']])\n\n# Plot the first 500 entries with selected columns\n#merged[['Soil humidity 1', 'Soil humidity 2', 'Soil humidity 3', 'Soil humidity 4']].plot();\n#merged[['Soil humidity 2', 'obs1 min']].plot();\nplt.show()\n"
] |
[
[
"pandas.merge",
"pandas.to_datetime",
"pandas.read_csv",
"matplotlib.pyplot.plot",
"scipy.signal.argrelextrema",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
sunqiang85/DASA
|
[
"c4fdc61db77f59f84c68abec3b985fbd7dc29323"
] |
[
"r2r_src/preprocess_mini_dataset.py"
] |
[
"import os\nimport sys\nimport re\nsys.path.append('build')\nimport MatterSim\nimport string\nimport json\nimport time\nimport math\nfrom collections import Counter, defaultdict\nimport numpy as np\nimport networkx as nx\nfrom param import args\nimport torch.nn.functional as F\nfrom param import args\nfrom tqdm import tqdm\n\n\ndef dump_datasets(splits, scan_ids):\n \"\"\"\n\n :param splits: A list of split.\n if the split is \"something@5000\", it will use a random 5000 data from the data\n :return:\n \"\"\"\n import random\n data = []\n old_state = random.getstate()\n for split in splits:\n # It only needs some part of the dataset?\n components = split.split(\"@\")\n number = -1\n if args.mini:\n number = 40\n if len(components) > 1:\n split, number = components[0], int(components[1])\n\n # Load Json\n # if split in ['train', 'val_seen', 'val_unseen', 'test',\n # 'val_unseen_half1', 'val_unseen_half2', 'val_seen_half1', 'val_seen_half2']: # Add two halves for sanity check\n if \"/\" not in split:\n with open('tasks/R2R/data/R2R_%s.json' % split) as f:\n new_data = json.load(f)\n else:\n with open(split) as f:\n new_data = json.load(f)\n\n # Partition\n if number > 0:\n random.seed(0) # Make the data deterministic, additive\n random.shuffle(new_data)\n new_data = new_data[:number]\n\n\n # Join\n data += new_data\n random.setstate(old_state) # Recover the state of the random generator\n print('read data from %s with %d items' % (splits, len(data)))\n\n filter_data = [c for c in new_data if c['scan'] in scan_ids][:100]\n print(\"filter_data\", split, len(filter_data))\n with open('tasks/R2R/mini_data/R2R_%s.json' % split, 'w') as f:\n json.dump(filter_data, f, indent=1)\n return data\n\n\ndef read_img_features(feature_store, scan_ids):\n import csv\n import base64\n from tqdm import tqdm\n\n # print(\"Start loading the image feature\")\n start = time.time()\n csv.field_size_limit(sys.maxsize)\n\n if \"detectfeat\" in args.features:\n views = int(args.features[10:])\n else:\n views = 36\n\n args.views = views\n print(\"input scan_ids\", scan_ids)\n tsv_fieldnames = ['scanId', 'viewpointId', 'image_w', 'image_h', 'vfov', 'features']\n features = {}\n features_index = []\n features_value = []\n with tqdm(total=10567, position=0, leave=True, ascii=True) as pbar:\n pbar.set_description(\"Start loading the image feature\")\n with open(feature_store, \"r\") as tsv_in_file: # Open the tsv file.\n reader = csv.DictReader(tsv_in_file, delimiter='\\t', fieldnames=tsv_fieldnames)\n for item in reader:\n if item['scanId'] in scan_ids:\n # print(\"item['scanId']\",type(item['scanId']))\n long_id = \"{}_{}\".format(item['scanId'], item['viewpointId'])\n # print(\"long_id\", long_id)\n # print('scan_ids', scan_ids)\n\n features_index.append(long_id)\n ft= np.frombuffer(base64.decodestring(item['features'].encode('ascii')),\n dtype=np.float32).reshape((views, -1))\n features_value.append(ft)\n print(\"len(features\", len(features))\n\n print(\"Finish Loading the image feature from %s in %0.4f seconds\" % (feature_store, time.time() - start))\n np.save(\"tasks/R2R/mini_data/img_feature_index.npy\", features_index)\n np.save(\"tasks/R2R/mini_data/img_feature_value.npy\", features_value)\n index_set = set([c.split('_')[0] for c in features_index])\n print(\"len(index_set)\", len(index_set))\n print(index_set)\n\n\n return features\n\n\ndef dump_depth_features(scan_ids):\n key_array = np.load(args.depth_index_file)\n value_array = np.load(args.depth_value_file)\n\n filtered_keys = []\n filtered_values = []\n for key, value in zip(key_array, value_array):\n if key[0] in scan_ids:\n filtered_keys.append(key)\n filtered_values.append(value)\n np.save(\"tasks/R2R/mini_data/viewpointIds.npy\", np.array(filtered_keys))\n np.save(\"tasks/R2R/mini_data/ResNet-152-imagenet-depth.npy\", np.array(filtered_values))\n\n\n\nif __name__ == '__main__':\n print(\"start\")\n scan_map = {}\n total_scan_ids = []\n for split in ['train', 'val_seen', 'val_unseen', 'test', 'aug_paths']:\n with open('tasks/R2R/data/R2R_%s.json' % split) as f:\n new_data = json.load(f)\n scan_map[split] = set([c['scan'] for c in new_data])\n\n for k,v in scan_map.items():\n print(k,len(v))\n scan_ids = list(scan_map[split])[:1]\n dump_datasets([split], scan_ids)\n total_scan_ids = total_scan_ids + scan_ids\n total_scan_ids = list(set(total_scan_ids))\n print(\"len(total_scan_ids)\",len(total_scan_ids))\n print(total_scan_ids)\n\n feature_store = 'img_features/ResNet-152-imagenet.tsv'\n read_img_features(feature_store, total_scan_ids)\n dump_depth_features(total_scan_ids)"
] |
[
[
"numpy.load",
"numpy.array",
"numpy.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RUBAIATH-E-ULFATH/Phishing-URL-Detector-Software-Desktop
|
[
"02fc1522a23421334e548df77df6048dc48ca6a8"
] |
[
"machine learning model/src/inspect_model.py"
] |
[
"from ruba_project_1.src.build_dataset import *\nfrom sklearn.externals import joblib\nimport pickle\nimport numpy as np\n\n\ndef main(url):\n #url = \"http://www.facebook.com/\"\n\n X_test = feature_extract(url)\n print(X_test)\n X_test = (np.array(X_test)).reshape(1, -1)\n\n # Load the model from the file\n rf_from_joblib = joblib.load('C:/Users/Rubaiath/PycharmProjects/PhishingDetectionApp/ruba_project_1/notebook_files/model_rf_1.pkl')\n\n # Use the loaded model to make predictions\n # print(rf_from_joblib.predict(X_test)[0])\n status = rf_from_joblib.predict(X_test)[0]\n\n if status == 1:\n print(\"This is a phising website\")\n else:\n print(\"This is a genuine website\")\n return status, X_test\n\n\nif __name__ == \"__main__\":\n url = \"http://www.facebook.com/\"\n main(url)\n"
] |
[
[
"numpy.array",
"sklearn.externals.joblib.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cns-iu/HuBMAP---Hacking-the-Kidney
|
[
"1a41c887f8edb0b52f5afade384a17dc3d3efec4"
] |
[
"models/1-Tom/train/src/02_train/run.py"
] |
[
"import time\nimport pandas as pd\nimport numpy as np\nimport gc\nfrom os.path import join as opj\nimport matplotlib.pyplot as plt\nimport pickle\nfrom tqdm import tqdm\nimport torchvision\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom dataset import HuBMAPDatasetTrain\nfrom models import build_model\nfrom scheduler import CosineLR\nfrom utils import elapsed_time\nfrom lovasz_loss import lovasz_hinge\nfrom losses import criterion_lovasz_hinge_non_empty\nfrom metrics import dice_sum, dice_sum_2\nfrom get_config import get_config\nconfig = get_config()\n\noutput_path = config['OUTPUT_PATH']\nfold_list = config['FOLD_LIST']\npretrain_path_list = config['pretrain_path_list']\ndevice = config['device']\n\ndef feature_imshow(inp, title=None): \n \"\"\"Imshow for Tensor.\"\"\" \n inp = inp.detach().numpy().transpose((1, 2, 0)) \n # mean = np.array([0.5, 0.5, 0.5]) \n # std = np.array([0.5, 0.5, 0.5])\n MEAN = np.array([0.485, 0.456, 0.406])\n STD = np.array([0.229, 0.224, 0.225])\n # inp = STD * inp + MEAN \n inp = np.clip(inp, 0, 1) \n plt.imshow(inp)\n plt.pause(0.001) # pause a bit so that plots are updated\n\n\ndef run(seed, data_df, pseudo_df, trn_idxs_list, val_idxs_list):\n log_cols = ['fold', 'epoch', 'lr',\n 'loss_trn', 'loss_val',\n 'trn_score', 'val_score', \n 'elapsed_time']\n \n criterion = nn.BCEWithLogitsLoss().to(device)\n criterion_clf = nn.BCEWithLogitsLoss().to(device)\n \n for fold, (trn_idxs, val_idxs) in enumerate(zip(trn_idxs_list, val_idxs_list)):\n if fold in fold_list:\n pass\n else:\n continue\n print('seed = {}, fold = {}'.format(seed, fold))\n \n log_df = pd.DataFrame(columns=log_cols, dtype=object)\n log_counter = 0\n\n #dataset\n trn_df = data_df.iloc[trn_idxs].reset_index(drop=True)\n val_df = data_df.iloc[val_idxs].reset_index(drop=True)\n \n #add pseudo label\n if pseudo_df is not None:\n trn_df = pd.concat([trn_df, pseudo_df], axis=0).reset_index(drop=True)\n \n # dataloader\n valid_dataset = HuBMAPDatasetTrain(val_df, config, mode='valid')\n valid_loader = DataLoader(valid_dataset, batch_size=config['test_batch_size'],\n shuffle=False, num_workers=4, pin_memory=True)\n \n #model\n model = build_model(model_name=config['model_name'],\n resolution=config['resolution'], \n deepsupervision=config['deepsupervision'], \n clfhead=config['clfhead'],\n clf_threshold=config['clf_threshold'],\n load_weights=True).to(device, torch.float32)\n # if pretrain_path_list is not None:\n # model.load_state_dict(torch.load(pretrain_path_list[fold]))\n # print(\"pre-trained models loaded\")\n \n# for p in model.parameters():\n# p.requires_grad = True\n \n optimizer = optim.Adam(model.parameters(), **config['Adam'])\n #optimizer = optim.RMSprop(model.parameters(), **config['RMSprop'])\n \n # Creates a GradScaler once at the beginning of training.\n scaler = torch.cuda.amp.GradScaler()\n \n if config['lr_scheduler_name']=='ReduceLROnPlateau':\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, **config['lr_scheduler']['ReduceLROnPlateau'])\n elif config['lr_scheduler_name']=='CosineAnnealingLR':\n #scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, **config['lr_scheduler']['CosineAnnealingLR'])\n scheduler = CosineLR(optimizer, **config['lr_scheduler']['CosineAnnealingLR'])\n elif config['lr_scheduler_name']=='OneCycleLR':\n scheduler = optim.lr_scheduler.OneCycleLR(optimizer, steps_per_epoch=len(train_loader),\n **config['lr_scheduler']['OneCycleLR'])\n \n #training\n val_score_best = -1e+99\n val_score_best2 = -1e+99\n loss_val_best = 1e+99\n epoch_best = 0\n counter_ES = 0\n trn_score = 0\n trn_score_each = 0\n start_time = time.time()\n for epoch in range(1, config['num_epochs']+1):\n if epoch < config['restart_epoch_list'][fold]:\n scheduler.step()\n continue\n \n# if elapsed_time(start_time) > config['time_limit']:\n# print('elapsed_time go beyond {} sec'.format(config['time_limit']))\n# break\n \n #print('lr = ', scheduler.get_lr()[0])\n print('lr : ', [ group['lr'] for group in optimizer.param_groups ])\n \n #train\n trn_df['binned'] = trn_df['binned'].apply(lambda x:config['binned_max'] if x>=config['binned_max'] else x)\n n_sample = trn_df['is_masked'].value_counts().min()\n trn_df_0 = trn_df[trn_df['is_masked']==False].sample(n_sample, replace=True)\n trn_df_1 = trn_df[trn_df['is_masked']==True].sample(n_sample, replace=True)\n \n n_bin = int(trn_df_1['binned'].value_counts().mean())\n trn_df_list = []\n for bin_size in trn_df_1['binned'].unique():\n trn_df_list.append(trn_df_1[trn_df_1['binned']==bin_size].sample(n_bin, replace=True))\n trn_df_1 = pd.concat(trn_df_list, axis=0)\n trn_df_balanced = pd.concat([trn_df_1, trn_df_0], axis=0).reset_index(drop=True)\n train_dataset = HuBMAPDatasetTrain(trn_df_balanced, config, mode='train')\n train_loader = DataLoader(train_dataset, batch_size=config['trn_batch_size'],\n shuffle=True, num_workers=4, pin_memory=True, drop_last=True) \n model.train()\n running_loss_trn = 0\n trn_score_numer = 0\n trn_score_denom = 0\n y_preds = []\n y_trues = []\n counter = 0\n tk0 = tqdm(train_loader, total=int(len(train_loader))) \n feature_test = []\n for i,data in enumerate(tk0):\n optimizer.zero_grad()\n with torch.cuda.amp.autocast():\n batch,c,h,w = data['img'].shape\n if config['clfhead']:\n y_clf = data['label'].to(device, torch.float32, non_blocking=True)\n if config['deepsupervision']:\n logits,logits_deeps,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n logits,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n if config['deepsupervision']:\n logits,logits_deeps = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n logits = model(data['img'].to(device, torch.float32, non_blocking=True))\n y_true = data['mask'].to(device, torch.float32, non_blocking=True)\n dice_numer, dice_denom = dice_sum_2((torch.sigmoid(logits)).detach().cpu().numpy(), \n y_true.detach().cpu().numpy(), \n dice_threshold=config['dice_threshold'])\n # print (\"C1\")\n trn_score_numer += dice_numer \n trn_score_denom += dice_denom\n y_true = y_true.unsqueeze(1)\n # get intermediate data\n # print(logits.shape)\n # print(y_true.shape)\n # print(model.x4.shape)\n feature_test.append(model.x4.cpu())#transpose(1,0).cpu()\n #out = torchvision.utils.make_grid(feature_test) \n #print(out.shape)\n #feature_imshow(out)\n # print (\"C2\")\n loss = criterion(logits,y_true)\n loss += lovasz_hinge(logits.view(-1,h,w), y_true.view(-1,h,w))\n if config['deepsupervision']:\n for logits_deep in logits_deeps:\n loss += 0.1 * criterion_lovasz_hinge_non_empty(criterion, logits_deep, y_true)\n if config['clfhead']:\n loss += criterion_clf(logits_clf.squeeze(-1),y_clf)\n # print (\"C3\")\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n #loss.backward()\n #optimizer.step()\n if config['lr_scheduler_name']=='OneCycleLR':\n scheduler.step()\n running_loss_trn += loss.item() * batch\n counter += 1\n # print (\"C5\")\n tk0.set_postfix(loss=(running_loss_trn / (counter * train_loader.batch_size) ))\n # print (\"C6\")\n epoch_loss_trn = running_loss_trn / len(train_dataset)\n trn_score = trn_score_numer / trn_score_denom\n feature_merge = np.concatenate(feature_test, axis=0)\n print(feature_merge.shape)\n fileObject = open(\"featue_test\", 'wb')\n # print (\"C10\")\n pickle.dump(feature_merge, fileObject)\n # print (\"C7\")\n \n #release GPU memory cache\n del data, loss,logits,y_true\n torch.cuda.empty_cache()\n gc.collect()\n # print (\"C8\")\n\n #eval\n model.eval()\n loss_val = 0\n val_score_numer = 0\n val_score_denom = 0\n # print (\"C9\")\n y_preds = []\n y_trues = []\n tk1 = tqdm(valid_loader, total=int(len(valid_loader)))\n for i,data in enumerate(tk1):\n with torch.no_grad():\n batch,c,h,w = data['img'].shape\n if config['clfhead']:\n y_clf = data['label'].to(device, torch.float32, non_blocking=True)\n if config['deepsupervision']:\n logits,logits_deeps,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n logits,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n if config['deepsupervision']:\n logits,logits_deeps = model(data['img'].to(device, torch.float32, non_blocking=True))\n else:\n logits = model(data['img'].to(device, torch.float32, non_blocking=True))\n y_true = data['mask'].to(device, torch.float32, non_blocking=True)\n dice_numer, dice_denom = dice_sum_2((torch.sigmoid(logits)).detach().cpu().numpy(), \n y_true.detach().cpu().numpy(), \n dice_threshold=config['dice_threshold'])\n val_score_numer += dice_numer \n val_score_denom += dice_denom\n y_true = y_true.unsqueeze(1)\n loss_val += criterion(logits,y_true).item() * batch\n loss_val += lovasz_hinge(logits.view(-1,h,w), y_true.view(-1,h,w)).item() * batch\n if config['deepsupervision']:\n for logits_deep in logits_deeps:\n loss_val += 0.1 * criterion_lovasz_hinge_non_empty(criterion, logits_deep, y_true).item() * batch\n if config['clfhead']:\n loss_val += criterion_clf(logits_clf.squeeze(-1), y_clf).item() * batch\n # print (\"Epoch About Done!\")\n \n \n #release GPU memory cache\n del data,logits,y_true\n torch.cuda.empty_cache()\n gc.collect()\n # print (\"EPoch Done!\")\n loss_val /= len(valid_dataset)\n val_score = val_score_numer / val_score_denom\n \n #logging\n log_df.loc[log_counter,log_cols] = np.array([fold, epoch,\n [ group['lr'] for group in optimizer.param_groups ],\n epoch_loss_trn, loss_val, \n trn_score, val_score,\n elapsed_time(start_time)], dtype='object')\n log_counter += 1\n \n #monitering\n print('epoch {:.0f} loss_trn = {:.5f}, loss_val = {:.5f}, trn_score = {:.4f}, val_score = {:.4f}'.format(epoch, epoch_loss_trn, loss_val, trn_score, val_score))\n if epoch%10 == 0:\n print(' elapsed_time = {:.1f} min'.format((time.time() - start_time)/60))\n \n if config['early_stopping']:\n if loss_val < loss_val_best: #val_score > val_score_best:\n val_score_best = val_score #update\n loss_val_best = loss_val #update\n epoch_best = epoch #update\n counter_ES = 0 #reset\n torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestloss.pth') #save\n print('model (best loss) saved')\n else:\n counter_ES += 1\n if counter_ES > config['patience']:\n print('early stopping, epoch_best {:.0f}, loss_val_best {:.5f}, val_score_best {:.5f}'.format(epoch_best, loss_val_best, val_score_best))\n break\n else:\n torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestloss.pth') #save\n \n if val_score > val_score_best2:\n val_score_best2 = val_score #update\n torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestscore.pth') #save\n print('model (best score) saved')\n \n if config['lr_scheduler_name']=='ReduceLROnPlateau':\n scheduler.step(loss_val)\n #scheduler.step(val_score)\n elif config['lr_scheduler_name']=='CosineAnnealingLR':\n scheduler.step()\n \n #for snapshot ensemble\n if config['lr_scheduler_name']=='CosineAnnealingLR':\n t0 = config['lr_scheduler']['CosineAnnealingLR']['t0']\n if (epoch%(t0+1)==0) or (epoch%(t0)==0) or (epoch%(t0-1)==0):\n torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_epoch{epoch}.pth') #save\n print(f'model saved epoch{epoch} for snapshot ensemble')\n \n #save result\n log_df.to_csv(output_path+f'log_seed{seed}_fold{fold}.csv', index=False)\n\n print('')\n \n #best model\n if config['early_stopping']&(counter_ES<=config['patience']):\n print('epoch_best {:d}, val_loss_best {:.5f}, val_score_best {:.5f}'.format(epoch_best, loss_val_best, val_score_best))\n \n del model\n torch.cuda.empty_cache()\n gc.collect()\n \n print('')\n"
] |
[
[
"matplotlib.pyplot.imshow",
"pandas.concat",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.sigmoid",
"numpy.clip",
"torch.utils.data.DataLoader",
"torch.cuda.empty_cache",
"pandas.DataFrame",
"torch.cuda.amp.autocast",
"numpy.concatenate",
"torch.cuda.amp.GradScaler",
"torch.nn.BCEWithLogitsLoss",
"torch.no_grad",
"numpy.array",
"matplotlib.pyplot.pause"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
BennyZhang-Codes/LDCT-denoising-with-DL-Methods-and-Dicom-Viewer-by-Benny
|
[
"7e1312e8b2846a9a54ca11500db2dd8e305d1a3c"
] |
[
"LDCT_Denoising/Neural_Network/Loss_Func.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\r\nimport torch.nn as nn\r\nfrom torch.nn import functional as F\r\nimport pytorch_ssim\r\n\r\nclass MSE_Loss(nn.Module):\r\n def __init__(self):\r\n super(MSE_Loss, self).__init__()\r\n\r\n def forward(self, input, target):\r\n return F.mse_loss(input, target, reduction='mean')\r\n\r\nclass SSIM_Loss(nn.Module):\r\n def __init__(self):\r\n super(SSIM_Loss, self).__init__()\r\n self.ssim_loss = pytorch_ssim.SSIM()\r\n\r\n def forward(self, input, target):\r\n return -self.ssim_loss(input, target)\r\n"
] |
[
[
"torch.nn.functional.mse_loss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vmware/iot-analytics-benchmark
|
[
"e7fd84af2298cffb85a78e0b3d3bbc342d42f556"
] |
[
"DL/python/send_images_cifar.py"
] |
[
"\"\"\"\nsend_images_cifar.py: sends labeled CIFAR10 images encoded as a string to an inferencing program\n\nUsage: python3 send_images_cifar.py [-h] [-s] [-i IMAGESPERSEC] [-t TOTALIMAGES] | nc <dest IP address> <dest port>\noptional arguments:\n -h, --help show this help message and exit\n -i IMAGESPERSEC, --imagesPerSec IMAGESPERSEC\n -t TOTALIMAGES, --totalImages TOTALIMAGES\n -s, --subtractMean\n\nCIFAR10 dataset from https://www.cs.toronto.edu/~kriz/cifar.html\n(Learning Multiple Layers of Features from Tiny Images, Alex Krizhevsky, 2009, https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf)\n\nCopyright (c) 2019 VMware, Inc.\n\nThis product is licensed to you under the Apache 2.0 license (the \"License\"). You may not use this product except in compliance with the Apache 2.0 License.\n\nThis product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file.\n\"\"\"\n\nimport argparse\nimport random\nfrom time import time, gmtime, strftime, sleep, monotonic\nimport sys\nfrom io import StringIO\nimport numpy as np\nfrom math import exp\nfrom keras.datasets import cifar10\n\nparser = argparse.ArgumentParser(description='Send CIFAR10 images encoded as strings')\nparser.add_argument(\"-i\", \"--imagesPerSec\", type=int, dest=\"imagesPerSec\", default=10)\nparser.add_argument(\"-t\", \"--totalImages\", type=int, dest=\"totalImages\", default=100)\nparser.add_argument(\"-s\", \"--subtractMean\", action=\"store_true\", dest=\"subtractMean\")\nargs = parser.parse_args()\nimages_per_second=args.imagesPerSec; total_images=args.totalImages; subtract_mean=args.subtractMean\n\ndef accurate_wait(wait_in_seconds):\n waitUntil = monotonic() + wait_in_seconds\n while (waitUntil > monotonic()):\n pass\n\n# Send stdout to stderr - cifar10.load_data() writes progress to stdout if data not cached locally\ntemp = sys.stdout\nsys.stdout = sys.stderr\nprint(\"%sZ: Loading and normalizing the CIFAR10 data\" % (strftime(\"%Y-%m-%dT%H:%M:%S\", gmtime())), file=sys.stderr)\n(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()\nsys.stdout = temp\nn_images = test_images.shape[0]\nn_labels = test_labels.shape[0]\n\n# Normalize data.\ntrain_images = train_images.astype('float32') / 255\ntest_images = test_images.astype('float32') / 255\n\nif subtract_mean:\n train_mean = np.mean(train_images, axis=0)\n test_images -= train_mean\n\n# First, write labeled, unraveled images to a list\nlabeled_images = []\nfor i in range(n_images):\n string = StringIO()\n np.savetxt(string, test_images[i].ravel().reshape(1,3072), fmt='%f') # 3072 = 32x32x3\n # Insert (single character) label in front of string, cut final '\\n' from string\n labeled_images.append(str(test_labels.item(i)) + string.getvalue()[:-1])\n\nprint(\"%sZ: Sending %d images per second for a total of %d images\" % (strftime(\"%Y-%m-%dT%H:%M:%S\", gmtime()), images_per_second, total_images), file=sys.stderr, end='')\nif subtract_mean:\n print(\" with pixel mean subtracted\", file=sys.stderr)\nelse:\n print(\"\", file=sys.stderr)\n\nfor i in range(total_images):\n print(labeled_images[i%n_images])\n sys.stdout.flush()\n # Use lognormal distribution to generate a positive random wait time with mean determined from images_per_second and long tail\n mean_wait = float(1.0/images_per_second)\n # Set standard deviation to half the mean_wait\n std_dev = mean_wait/2.0\n fudge_factor = .7 # Needed to reduce wait time to compensate for computation/network time - set empirically\n accurate_wait(fudge_factor*mean_wait*random.lognormvariate(mean_wait,std_dev)/exp(mean_wait + std_dev**2/2))\n if (((i+1) % images_per_second == 0) or (i == total_images-1)):\n print(\"%sZ: %d images sent\" % (strftime(\"%Y-%m-%dT%H:%M:%S\", gmtime()), i+1), file=sys.stderr)\n\nprint(\"\") # Indicate end of send\nprint(\"%sZ: Image stream ended\" % (strftime(\"%Y-%m-%dT%H:%M:%S\", gmtime())), file=sys.stderr)\n"
] |
[
[
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chivalry/pmdarima
|
[
"83aaa8249fc93b8bc2311431af53d2d10d312eea"
] |
[
"pmdarima/preprocessing/exog/fourier.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom .base import BaseExogFeaturizer\nfrom ..base import UpdatableMixin\nfrom ._fourier import C_fourier_terms\n\n__all__ = ['FourierFeaturizer']\n\nsinpi = (lambda x: np.sin(np.pi * x))\ncospi = (lambda x: np.cos(np.pi * x))\n\n\n# Candidate for cythonization?\ndef _fourier_terms(p, times):\n # X = []\n # for e in p:\n # X.append(sinpi(2 * e * times))\n # X.append(cospi(2 * e * times))\n X = C_fourier_terms(p, times)\n return np.asarray(X).T\n\n\nclass FourierFeaturizer(BaseExogFeaturizer, UpdatableMixin):\n \"\"\"Fourier terms for modeling seasonality\n\n This transformer creates an exogenous matrix containing terms from a\n Fourier series, up to order ``k``. It is based on ``R::forecast code`` [1].\n In practice, it permits us to fit a seasonal time series *without* seasonal\n order (i.e., ``seasonal=False``) by supplying decomposed seasonal Fourier\n terms as an exogenous array.\n\n The advantages of this technique, per Hyndman [2]:\n\n * It allows any length seasonality\n * The seasonal pattern is smooth for small values of K (but more wiggly\n seasonality can be handled by increasing K)\n * The short-term dynamics are easily handled with a simple ARMA error\n\n The disadvantage is that the seasonal periodicity of the time series is\n assumed to be fixed.\n\n Functionally, this is a featurizer. This means that exogenous features are\n *derived* from ``y``, as opposed to transforming an existing exog array.\n It also behaves slightly differently in the :func:`transform` stage than\n most other exogenous transformers in that ``exog`` is not a required arg,\n and it takes ``**kwargs``. See the :func:`transform` docstr for more info.\n\n Parameters\n ----------\n m : int\n The seasonal periodicity of the endogenous vector, y.\n\n k : int, optional (default=None)\n The number of sine and cosine terms (each) to include. I.e., if ``k``\n is 2, 4 new features will be generated. ``k`` must not exceed ``m/2``,\n which is the default value if not set. The value of ``k`` can be\n selected by minimizing the AIC.\n\n Notes\n -----\n * Helpful for long seasonal periods (large ``m``) where ``seasonal=True``\n seems to take a very long time to fit a model.\n\n References\n ----------\n .. [1] https://github.com/robjhyndman/forecast/blob/master/R/season.R\n .. [2] https://robjhyndman.com/hyndsight/longseasonality/\n \"\"\"\n\n def __init__(self, m, k=None):\n self.m = m\n self.k = k\n\n def fit(self, y, exogenous=None):\n \"\"\"Fit the transformer\n\n Computes the periods of all the Fourier terms. The values of ``y`` are\n not actually used; only the periodicity is used when computing Fourier\n terms.\n\n Parameters\n ----------\n y : array-like or None, shape=(n_samples,)\n The endogenous (time-series) array.\n\n exogenous : array-like or None, shape=(n_samples, n_features), optional\n The exogenous array of additional covariates. If specified, the\n Fourier terms will be column-bound on the right side of the matrix.\n Otherwise, the Fourier terms will be returned as the new exogenous\n array.\n \"\"\"\n # Since we don't fit any params here, we can just check the params\n _, _ = self._check_y_exog(y, exogenous, null_allowed=True)\n\n m = self.m\n k = self.k\n if k is None:\n k = m // 2\n if 2 * k > m or k < 1:\n raise ValueError(\"k must be a positive integer not greater \"\n \"than m//2\")\n\n # Compute the periods of all Fourier terms. Since R allows multiple\n # seasonality and we do not, we can do this much more simply.\n p = ((np.arange(k) + 1) / m).astype(np.float64) # 1:K / m\n\n # If sinpi is 0... maybe blow up?\n # if abs(2 * p - round(2 * p)) < np.finfo(y.dtype).eps: # min eps\n\n self.p_ = p\n self.k_ = k\n self.n_ = y.shape[0]\n\n return self\n\n def transform(self, y, exogenous=None, n_periods=0, **_):\n \"\"\"Create Fourier term features\n\n When an ARIMA is fit with an exogenous array, it must be forecasted\n with one also. Since at ``predict`` time in a pipeline we won't have\n ``y`` (and we may not yet have an ``exog`` array), we have to know how\n far into the future for which to compute Fourier terms (hence\n ``n_periods``).\n\n This method will compute the Fourier features for a given frequency and\n ``k`` term. Note that the ``y`` values are not used to compute these,\n so this does not pose a risk of data leakage.\n\n Parameters\n ----------\n y : array-like or None, shape=(n_samples,)\n The endogenous (time-series) array. This is unused and technically\n optional for the Fourier terms, since it uses the pre-computed\n ``n`` to calculate the seasonal Fourier terms.\n\n exogenous : array-like or None, shape=(n_samples, n_features), optional\n The exogenous array of additional covariates. If specified, the\n Fourier terms will be column-bound on the right side of the matrix.\n Otherwise, the Fourier terms will be returned as the new exogenous\n array.\n\n n_periods : int, optional (default=0)\n The number of periods in the future to forecast. If ``n_periods``\n is 0, will compute the Fourier features for the training set.\n ``n_periods`` corresponds to the number of samples that will be\n returned.\n \"\"\"\n check_is_fitted(self, \"p_\")\n _, exog = self._check_y_exog(y, exogenous, null_allowed=True)\n\n if n_periods and exog is not None:\n if n_periods != exog.shape[0]:\n raise ValueError(\"If n_periods and exog are specified, \"\n \"n_periods must match dims of exogenous \"\n \"({0} != {1})\"\n .format(n_periods, exog.shape[0]))\n\n times = np.arange(self.n_ + n_periods, dtype=np.float64) + 1\n X_fourier = _fourier_terms(self.p_, times)\n\n # Maybe trim if we're in predict mode... in that case, we only keep the\n # last n_periods rows in the matrix we've created\n if n_periods:\n X_fourier = X_fourier[-n_periods:, :]\n\n if exog is None:\n exog = X_fourier\n else:\n exog = np.hstack([exog, X_fourier])\n\n return y, exog\n\n def update_and_transform(self, y, exogenous, **kwargs):\n \"\"\"Update the params and return the transformed arrays\n\n Since no parameters really get updated in the Fourier featurizer, all\n we do is compose forecasts for ``n_periods=len(y)`` and then update\n ``n_``.\n\n Parameters\n ----------\n y : array-like or None, shape=(n_samples,)\n The endogenous (time-series) array.\n\n exogenous : array-like or None, shape=(n_samples, n_features)\n The exogenous array of additional covariates.\n\n **kwargs : keyword args\n Keyword arguments required by the transform function.\n \"\"\"\n check_is_fitted(self, \"p_\")\n\n self._check_endog(y)\n _, Xt = self.transform(y, exogenous, n_periods=len(y), **kwargs)\n\n # Update this *after* getting the exog features\n self.n_ += len(y)\n return y, Xt\n"
] |
[
[
"numpy.hstack",
"sklearn.utils.validation.check_is_fitted",
"numpy.asarray",
"numpy.arange",
"numpy.cos",
"numpy.sin"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
msieb1/LTCN
|
[
"c9432891327774edf8193e885cc4f10f53fcaa60",
"c9432891327774edf8193e885cc4f10f53fcaa60"
] |
[
"utils/rot_utils_old.py",
"train_pose_euler_crop.py"
] |
[
"\nimport torch\nimport numpy as np\nimport math\nfrom ipdb import set_trace\n\n\n # Checks if a matrix is a valid rotation matrix.\ndef isRotationMatrix(R) :\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype = R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6\n \n\n\ndef norm_sincos(sin, cos):\n stacked_ = torch.cat((sin[None], cos[None]))\n stacked = stacked_ / torch.norm(stacked_)\n return stacked[0], stacked[1]\n\ndef sincos2rotm(a_pred):\n # copy of matlab \n # R = [ cy*cz sy*sx*cz-sz*cx sy*cx*cz+sz*sx \n # cy*sz sy*sx*sz+cz*cx sy*cx*sz-cz*sx \n # -sy cy*sx cy*cx] \n sinx, cosx = norm_sincos(a_pred[0], a_pred[1]) \n siny, cosy = norm_sincos(a_pred[2], a_pred[3]) \n sinz, cosz = norm_sincos(a_pred[4], a_pred[5]) \n r11 = cosy*cosz\n r12 = sinx*siny*cosz - cosx*sinz\n r13 = cosx*siny*cosz + sinx*sinz\n r21 = cosy*sinz\n r22 = sinx*siny*sinz + cosx*cosz\n r23 = cosx*siny*sinz - sinx*cosz\n r31 = -siny\n r32 = sinx*cosy\n r33 = cosx*cosy\n r1 = torch.cat([r11[None],r12[None],r13[None]])\n r2 = torch.cat([r21[None],r22[None],r23[None]])\n r3 = torch.cat([r31[None],r32[None],r33[None]])\n R = torch.stack((r1, r2, r3), dim=0)\n return R \n\ndef axisAngletoRotationMatrix(a):\n v = a[:-1]\n theta = a[-1]\n r11 = 1 + (-v[2]**2 - v[1]**2)*(1-torch.cos(theta)) + 0*torch.sin(theta) \n r12 = (v[0] * v[1])*(1-torch.cos(theta)) - v[2] * torch.sin(theta) \n r13 = (v[0] * v[2])*(1-torch.cos(theta)) + v[1] * torch.sin(theta)\n r21 = (v[0] * v[1])*(1-torch.cos(theta)) + v[2] * torch.sin(theta)\n r22 = 1 + (-v[2]**2 - v[0]**2)*(1-torch.cos(theta)) + 0 * torch.sin(theta)\n r23 = (v[1] * v[2])*(1-torch.cos(theta)) - v[0] * torch.sin(theta)\n r31 = (v[0] * v[2])*(1-torch.cos(theta)) - v[1] * torch.sin(theta)\n r32 = (v[1] * v[2])*(1-torch.cos(theta)) + v[0] * torch.sin(theta)\n r33 = 1 + (-v[1]**2 - v[0]**2)*(1-torch.cos(theta)) + 0 * torch.sin(theta)\n r1 = torch.cat([r11[None],r12[None],r13[None]])\n r2 = torch.cat([r21[None],r22[None],r23[None]])\n r3 = torch.cat([r31[None],r32[None],r33[None]])\n R = torch.stack((r1, r2, r3), dim=0)\n\n return R\n\n\n\n# Calculates Rotation Matrix given euler angles.\ndef eulerAnglesToRotationMatrix(theta, tensor=False) :\n \"\"\"\n Theta is given as euler angles Z-Y-X, corresponding to yaw, pitch, roll\n \"\"\" \n if not tensor:\n R_x = np.array([[1, 0, 0 ],\n [0, math.cos(theta[0]), -math.sin(theta[0]) ],\n [0, math.sin(theta[0]), math.cos(theta[0]) ]\n ])\n \n \n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ],\n [0, 1, 0 ],\n [-math.sin(theta[1]), 0, math.cos(theta[1]) ]\n ]) \n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n \n R = np.dot(R_z, np.dot( R_y, R_x ))\n return R\n\n\n \n# Calculates rotation matrix to euler angles\n# The result is the same as MATLAB except the order\n# of the euler angles ( x and z are swapped ).\n# Return X-Y-Z (roll pitch yaw)\ndef rotationMatrixToEulerAngles(R) :\n \n if not R.type() == 'torch.cuda.FloatTensor':\n sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n \n singular = sy < 1e-6\n \n if not singular :\n x = math.atan2(R[2,1] , R[2,2])\n y = math.atan2(-R[2,0], sy)\n z = math.atan2(R[1,0], R[0,0])\n else :\n x = math.atan2(-R[1,2], R[1,1])\n y = math.atan2(-R[2,0], sy)\n z = 0\n return np.array([x, y, z])\n else:\n sy = torch.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n \n if not singular :\n x = torch.atan2(R[2,1] , R[2,2])\n y = torch.atan2(-R[2,0], sy)\n z = torch.atan2(R[1,0], R[0,0])\n else :\n x = torch.atan2(-R[1,2], R[1,1])\n y = torch.atan2(-R[2,0], sy)\n z = 0\n return torch.stack((x, y, z))\n\n# def create_random_rot(tensor=False):\n# \"\"\"\n# vector should be 6 dimensional\n# \"\"\"\n# # random unit vectors\n# u = np.random.rand(3)\n# v = np.random.rand(3)\n# u /= np.linalg.norm(u)\n# v /= np.linalg.norm(v)\n# # subtract (v*u)u from v and normalize\n# v -= v.dot(u)*u\n# v /= np.linalg.norm(v)\n# # build cross product\n# w = np.cross(u, v)\n# w /= np.linalg.norm(w)\n# R = np.hstack([u[:,None], v[:,None], w[:,None]])\n\n# if tensor:\n# return torch.Tensor(R)\n# else:\n# return R\n\n\n\ndef create_rot_from_vector(vector):\n \"\"\"\n vector should be 6 dimensional\n \"\"\"\n # random unit vectors\n u = vector[:3]\n v = vector[3:]\n u /= np.linalg.norm(u)\n v /= np.linalg.norm(v)\n # subtract (v*u)u from v and normalize\n v -= v.dot(u)*u\n v /= np.linalg.norm(v)\n # build cross product\n w = np.cross(u, v)\n w /= np.linalg.norm(w)\n R = np.hstack([u[:,None], v[:,None], w[:,None]])\n return R\n\n\n",
"import matplotlib\nmatplotlib.use('Agg')\nimport os\nfrom os.path import join\nimport argparse\nimport torch\nimport numpy as np\nimport pickle\nimport sys\nimport datetime\nsys.path.append('./utils')\n\nfrom torch import optim\nfrom torch import nn\nfrom torch import multiprocessing\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader, ConcatDataset\nfrom utils.builders import SingleViewDepthTripletBuilder, MultiViewDepthTripletBuilder, MultiViewTripletBuilder, SingleViewTripletBuilder\nfrom utils.builder_utils import distance, Logger, ensure_folder, collate_fn, time_stamped\nfrom utils.vocabulary import Vocabulary\nfrom ipdb import set_trace\nfrom sklearn.preprocessing import OneHotEncoder\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom torchvision import transforms\nimport torchvision.utils as vutils\nimport torchvision.models as models\nfrom torchvision import datasets\nfrom tensorboardX import SummaryWriter\nimport matplotlib.pyplot as plt\nfrom shutil import copy2\nimport importlib\nfrom pyquaternion import Quaternion\n\nfrom models.pose_predictor_euler_crop import define_model\nfrom utils.plot_utils import plot_mean\nfrom utils.rot_utils_old import create_rot_from_vector, rotationMatrixToEulerAngles, \\\n isRotationMatrix, eulerAnglesToRotationMatrix, \\\n norm_sincos, sincos2rotm\nfrom utils.network_utils import loss_rotation, loss_euler_reparametrize, loss_axisangle, batch_size, apply,\\\n loss_quat, loss_quat_single, euler_XYZ_to_reparam, loss_quat_huber\nfrom utils.plot_utils import plot_mean\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"]= \"1,2,3\"\n \nIMAGE_SIZE = (299, 299)\nNUM_VIEWS = 1\nSAMPLE_SIZE = 40\nVAL_SEQS =5\nTRAIN_SEQS_PER_EPOCH = 80\nLOSS_FN = loss_euler_reparametrize\n\nEXP_ROOT_DIR = '/media/hdd/msieb/data/tcn_data/experiments'\nsys.path.append(EXP_ROOT_DIR)\n\nclass Trainer(object):\n def __init__(self, use_cuda, load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args, multi_gpu=True):\n self.use_cuda = use_cuda\n self.load_model = load_model\n self.model_folder = model_folder\n self.validation_directory = validation_directory\n self.train_directory = train_directory\n self.args = args\n\n self.builder = builder\n self.loss_fn = loss_fn\n self.logdir = join(model_folder, 'logs')\n self.writer = SummaryWriter(self.logdir)\n self.logger = Logger(self.args.log_file)\n self.itr = 0\n\n # Create Model\n self.model = self.create_model()\n if multi_gpu:\n self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))\n\n # Build validation set\n validation_builder = builder(self.args.n_views, validation_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE)\n validation_set = [validation_builder.build_set() for i in range(VAL_SEQS)]\n validation_set = ConcatDataset(validation_set)\n self.len_validation_set = len(validation_set)\n del validation_builder\n self.validation_loader = DataLoader(\n validation_set, \n batch_size=8, \n shuffle=False, \n pin_memory=self.use_cuda,\n )\n self.validation_calls = 0\n # Build Training Set\n self.triplet_builder = builder(self.args.n_views, \\\n train_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE)\n self.training_queue = multiprocessing.Queue(1)\n dataset_builder_process = multiprocessing.Process(target=self.build_set, args=(self.training_queue, self.triplet_builder, self.logger), daemon=True)\n dataset_builder_process.start()\n\n # Get Logger\n \n\n # Model specific setup\n # self.optimizer = optim.SGD(self.model.parameters(), lr=self.args.lr_start, momentum=0.9)\n self.optimizer = optim.Adam(self.model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08)\n # This will diminish the learning rate at the milestones ///// 0.1, 0.01, 0.001 if not using automized scheduler\n self.learning_rate_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min')\n # self.criterion = nn.CrossEntropyLoss()\n\n def train(self):\n\n trn_losses_ = []\n val_losses_= []\n val_acc_ = []\n trn_acc_ = []\n\n\n for epoch in range(self.args.start_epoch, self.args.start_epoch + self.args.epochs):\n print(\"=\" * 20)\n self.logger.info(\"Starting epoch: {0} \".format(epoch))\n\n dataset = self.training_queue.get()\n data_loader = DataLoader(\n dataset=dataset,\n batch_size=self.args.minibatch_size, # batch_size(epoch, self.args.max_minibatch_size),\n shuffle=True,\n pin_memory=self.use_cuda,\n )\n \n train_embedding_features_buffer = []\n train_images_buffer = []\n train_labels = []\n correct = 0\n\n for _ in range(0, 1):\n losses = []\n\n for minibatch in data_loader:\n if self.use_cuda:\n anchor_frames = minibatch[0].cuda()\n #anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix\n anchor_quats = minibatch[1].cuda() # load as 3x3 rotation matrix\n # frames = Variable(minibatch)\n loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats)\n losses.append(loss.data.cpu().numpy()) \n correct += (torch.norm(a_pred - anchor_quats, 2) < 1).data.cpu().numpy().sum() # print(gradcheck(loss_fn, (tcn, minibatch,))) \n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Add embeddings\n train_labels.append(anchor_quats)\n train_embedding_features_buffer.append(anchor_quats)\n train_images_buffer.append(anchor_frames)\n print(\"logging to {}\".format(self.logdir))\n\n self.writer.add_scalar('data/train_loss', np.mean(losses), self.itr)\n self.writer.add_scalar('data/train_correct', correct / len(data_loader), self.itr)\n self.itr += 1 \n trn_losses_.append(np.mean(losses))\n self.logger.info('train loss: ', np.mean(losses))\n self.logger.info(\"Training score correct {correct}/{total}\".format(\n correct=correct,\n total=len(data_loader)\n ))\n trn_acc_.append(correct)\n\n self.writer.add_image('frame_1', minibatch[0][0], self.itr)\n # self.writer.add_image('pose1', str(minibatch[1][0].data.detach().cpu().numpy()), self.itr)\n self.writer.add_image('frame_2', minibatch[0][1], self.itr)\n # self.writer.add_image('pose_2', str(minibatch[1][1].data.detach().cpu().numpy()), self.itr) \n self.writer.add_image('frame_3', minibatch[0][2], self.itr)\n # self.writer.add_image('pose_3', str(minibatch[1][2].data.detach().cpu().numpy()), self.itr)\n self.writer.add_image('frame_4', minibatch[0][3], self.itr)\n # self.writer.add_image('pose_4', str(minibatch[1][3].data.detach().cpu().numpy()), self.itr)\n # Get embeddings\n features = torch.cat(train_embedding_features_buffer[:30]).squeeze_()\n labels = torch.cat(train_labels[:30]).squeeze_()\n # features = train_embedding_features_buffer.view(train_embedding_features_buffer.shape[0]*train_embedding_features_buffer.shape[1], -1)\n # label = torch.Tensor(np.asarray(label_buffer))\n images = torch.cat(train_images_buffer[:30]).squeeze_()#/255.0, [0, 3, 1, 2]\n self.writer.add_embedding(features, metadata=labels, label_img=images, global_step=epoch)\n \n if epoch % 1 == 0:\n loss, correct = self.validate()\n self.learning_rate_scheduler.step(loss)\n val_losses_.append(loss)\n val_acc_.append(correct)\n\n if epoch % self.args.save_every == 0 and epoch != 0:\n self.logger.info('Saving model.')\n self.save_model(self.model, self.model_filename(self.args.model_name, epoch), join(self.model_folder, 'weight_files'))\n print(\"logging to {}\".format(self.logdir))\n\n plot_mean(trn_losses_, self.model_folder, 'train_loss')\n plot_mean(val_losses_, self.model_folder, 'validation_loss')\n plot_mean(trn_acc_, self.model_folder, 'train_acc')\n plot_mean(val_acc_, self.model_folder, 'validation_accuracy')\n # plot_mean(val_acc_no_margin_, self.model_folder, 'validation_accuracy_no_margin')\n\n def validate(self):\n # Run model on validation data and log results\n correct = 0\n losses = []\n for minibatch in self.validation_loader:\n if self.use_cuda:\n anchor_frames = minibatch[0].cuda()\n #anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix\n anchor_quats = minibatch[1].cuda() # load as 3x3 rotation matrix\n loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats)\n losses.append(loss.data.cpu().numpy())\n correct += (torch.norm(a_pred - anchor_quats, 2) < 0.1).data.cpu().numpy().sum()\n\n self.writer.add_scalar('data/valid_loss', np.mean(losses), self.validation_calls)\n self.writer.add_scalar('data/validation_correct', correct / self.len_validation_set, self.validation_calls)\n\n self.validation_calls += 1\n loss = np.mean(losses)\n self.logger.info(\"Validation score correct {correct}/{total}\".format(\n correct=correct,\n total=self.len_validation_set\n ))\n self.logger.info('val loss: ',loss)\n return loss, correct\n \n\n def model_filename(self, model_name, epoch):\n return \"{model_name}-epoch-{epoch}.pk\".format(model_name=model_name, epoch=epoch)\n\n def save_model(self, model, filename, model_folder):\n ensure_folder(model_folder)\n model_path = os.path.join(model_folder, filename)\n torch.save(model.state_dict(), model_path)\n\n\n def build_set(self, queue, triplet_builder, log):\n while 1:\n datasets = []\n for i in range(TRAIN_SEQS_PER_EPOCH):\n dataset = triplet_builder.build_set()\n datasets.append(dataset)\n dataset = ConcatDataset(datasets)\n # log.info('Created {0} triplets'.format(len(dataset)))\n queue.put(dataset)\n\n def create_model(self):\n model = define_model(pretrained=True)\n # model = PosNet()\n if self.load_model:\n model_path = os.path.join(\n self.model_folder,\n self.load_model\n )\n # map_location allows us to load models trained on cuda to cpu.\n model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))\n\n if self.use_cuda:\n model = model.cuda()\n return model\n\n def batch_size(self, epoch, max_size):\n exponent = epoch // 100\n return min(max(2 ** (exponent), 2), max_size)\n\ndef main(args):\n # module = importlib.import_module(args.exp_name + '.config')\n # conf = getattr(module, 'Config_Isaac_Server')()\n # EXP_DIR = conf.EXP_DIR\n # MODEL_FOLDER = conf.MODEL_FOLDER\n\n\n # GPU Configuration\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n use_cuda = torch.cuda.is_available()\n\n # Load model\n model_folder = join(EXP_ROOT_DIR, args.exp_name, 'trained_models', args.run_name, time_stamped())\n if not os.path.exists(model_folder):\n os.makedirs(model_folder)\n\n # Get data loader builder and loss function\n builder = getattr(importlib.import_module('utils.builders'), args.builder)\n loss_fn = LOSS_FN\n\n # Define train and validation directories\n train_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/train/') \n validation_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/valid/') \n\n # Copies of executed config\n if not os.path.exists('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments'):\n os.makedirs('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments')\n copy2('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/train_tcn_no_captions.py', model_folder)\n copy2('/'.join(os.path.realpath(__file__).split('/')[:-2]) + '/gps-lfd' + '/config.py', model_folder)\n \n # Build training class\n trainer = Trainer(use_cuda, args.load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args) \n trainer.train()\n\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--start-epoch', type=int, default=0)\n parser.add_argument('--epochs', type=int, default=1000)\n parser.add_argument('--save-every', type=int, default=10)\n parser.add_argument('--load-model', type=str, required=False)\n \n parser.add_argument('--minibatch-size', type=int, default=8)\n parser.add_argument('--model-name', type=str, default='tcn')\n parser.add_argument('--log-file', type=str, default='./out.log')\n parser.add_argument('--lr-start', type=float, default=0.001)\n parser.add_argument('--n-views', type=int, default=NUM_VIEWS)\n parser.add_argument('--alpha', type=float, default=0.01, help='weighing factor of language loss to triplet loss')\n\n # Model parameters\n \n # Path parameters\n parser.add_argument('--exp-name', type=str, required=True)\n parser.add_argument('--run-name', type=str, required=True)\n parser.add_argument('--builder', type=str, required=True)\n\n args = parser.parse_args()\n print(args)\n\n main(args)\n"
] |
[
[
"numpy.cross",
"numpy.dot",
"numpy.hstack",
"torch.norm",
"torch.cos",
"torch.cat",
"torch.sqrt",
"torch.sin",
"numpy.linalg.norm",
"numpy.identity",
"numpy.transpose",
"torch.stack",
"numpy.array",
"torch.atan2"
],
[
"torch.norm",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.multiprocessing.Queue",
"torch.load",
"torch.cat",
"matplotlib.use",
"torch.utils.data.DataLoader",
"torch.utils.data.ConcatDataset",
"numpy.mean",
"torch.cuda.is_available",
"torch.cuda.device_count",
"torch.multiprocessing.Process"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yidinghe/machine-learning-100-days
|
[
"3050a5a5fd137316e22814c36ab122f0f7b5aec3"
] |
[
"day-2/Day2_Simple_Linear_Regression.py"
] |
[
"#Step 1: Data Preprocessing\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndataset = pd.read_csv('../datasets/studentscores.csv')\nX = dataset.iloc[:, : 1].values\nY = dataset.iloc[:, 1 ].values\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 1/4, random_state = 0)\nprint('X_train')\nprint(X_train)\nprint('X_test')\nprint(X_test)\nprint('Y_train')\nprint(Y_train)\nprint('Y_test')\nprint(Y_test)\n\n#Step 2: LinearRegression \nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor = regressor.fit(X_train, Y_train)\n\n#Step 3: Prediction Outcome\nY_pred = regressor.predict(X_test)\nprint('Y_pred')\nprint(Y_pred)\n\n#Step 4: Visulization\nplt.scatter(X_train, Y_train, color = 'red')\nplt.plot(X_train, regressor.predict(X_train), color = 'blue')\nplt.show()\nplt.scatter(X_test, Y_test, color = 'red')\nplt.plot(X_test, regressor.predict(X_test), color = 'blue')\nplt.show()\n"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
martinjzhang/scDRS
|
[
"69a9fb4e50dbfa6b1afe0dd222b0d349c5db00eb",
"69a9fb4e50dbfa6b1afe0dd222b0d349c5db00eb"
] |
[
"compute_downstream.py",
"tests/test_CLI.py"
] |
[
"import scanpy as sc\nfrom anndata import read_h5ad\nimport pandas as pd\nimport numpy as np\nimport scipy as sp\nimport os\nimport fnmatch\nimport time\nimport argparse\nfrom statsmodels.stats.multitest import multipletests\n\n# Inhouse tools\nimport scdrs.util as util\nimport scdrs.data_loader as dl\nimport scdrs.method as md\n\n\n\"\"\"\n# Fixit\n\n\n# Todo\n- Implement a memory efficient version\n- \"gene_weight\" argument needs to be tested \n\n# Finished\n- Add --n_ctrl (default value 500) \n- Add --cov_file option to regress out covariates stored in COV_FILE before feeding into the score function \n- Add --ctrl_match_opt='mean_var': use mean- and var- matched control genes \n- Change name from scTRS to scdrs (072721)\n- Fixed: Warning for compute_score: Trying to set attribute `.X` of view, copying. (did: v_norm_score = v_raw_score.copy())\n\n\"\"\"\n\nVERSION = \"0.0.1\"\nVERSION = \"beta\"\n\n\ndef main(args):\n sys_start_time = time.time()\n\n MASTHEAD = \"******************************************************************************\\n\"\n MASTHEAD += \"* scDRS downsteam analyses \\n\"\n MASTHEAD += \"* Version %s\\n\" % VERSION\n MASTHEAD += \"* Martin Jinye Zhang and Kangcheng Hou\\n\"\n MASTHEAD += \"* HSPH / Broad Institute / UCLA\\n\"\n MASTHEAD += \"* MIT License\\n\"\n MASTHEAD += \"******************************************************************************\\n\"\n\n ###########################################################################################\n ###### Parse Options ######\n ###########################################################################################\n H5AD_FILE = args.h5ad_file\n SCORE_FILE = args.score_file\n CELLTYPE_LIST = [] if args.cell_type is None else args.cell_type.split(\",\")\n VARIABLE_LIST = [] if args.cell_variable is None else args.cell_variable.split(\",\")\n FLAG_GENE = args.flag_gene == \"True\"\n FLAG_FILTER = args.flag_filter == \"True\"\n FLAG_RAW_COUNT = args.flag_raw_count == \"True\"\n OUT_FOLDER = args.out_folder\n\n header = MASTHEAD\n header += \"Call: ./compute_downstream.py \\\\\\n\"\n header += \"--h5ad_file %s\\\\\\n\" % H5AD_FILE\n header += \"--score_file %s\\\\\\n\" % SCORE_FILE\n header += \"--cell_type %s\\\\\\n\" % args.cell_type\n header += \"--cell_variable %s\\\\\\n\" % args.cell_variable\n header += \"--flag_gene %s\\\\\\n\" % FLAG_GENE\n header += \"--flag_filter %s\\\\\\n\" % FLAG_FILTER\n header += \"--flag_raw_count %s\\\\\\n\" % FLAG_RAW_COUNT\n header += \"--out_folder %s\\n\" % OUT_FOLDER\n print(header)\n\n ###########################################################################################\n ###### Load data ######\n ###########################################################################################\n print(\"Load data:\")\n\n # Load .h5ad file\n adata = read_h5ad(H5AD_FILE)\n if FLAG_FILTER:\n sc.pp.filter_cells(adata, min_genes=250)\n sc.pp.filter_genes(adata, min_cells=50)\n if FLAG_RAW_COUNT:\n sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e4)\n sc.pp.log1p(adata)\n print(\n \"--h5ad_file loaded: n_cell=%d, n_gene=%d (sys_time=%0.1fs)\"\n % (adata.shape[0], adata.shape[1], time.time() - sys_start_time)\n )\n\n # Check CELLTYPE_LIST and VARIABLE_LIST\n temp_list = [x for x in CELLTYPE_LIST + VARIABLE_LIST if x not in adata.obs.columns]\n if len(temp_list) > 0:\n raise ValueError(\n \"Following columns not in adata.obs.columns: %s\" % \",\".join(temp_list)\n )\n else:\n print(\"cell_type and cell_variable are in adata.obs.columns\")\n\n # Load score file\n score_file_pattern = SCORE_FILE.split(os.path.sep)[-1]\n score_dir = SCORE_FILE.replace(os.path.sep + score_file_pattern, \"\")\n score_file_list = [\n x\n for x in os.listdir(score_dir)\n if fnmatch.fnmatch(x, score_file_pattern.replace(\"@\", \"*\"))\n ]\n print(\"Infer score_dir=%s\" % score_dir)\n print(\"Find %s score_files: %s\" % (len(score_file_list), \",\".join(score_file_list)))\n dic_score = {}\n for score_file in score_file_list:\n temp_df = pd.read_csv(\n score_dir + os.path.sep + score_file, sep=\"\\t\", index_col=0\n )\n n_cell_overlap = len(set(adata.obs_names) & set(temp_df.index))\n if n_cell_overlap < 0.1 * adata.shape[0]:\n print(\n \"WARNING: %s skipped, %d/%d cells in adata\"\n % (score_file, n_cell_overlap, adata.shape[0])\n )\n else:\n dic_score[score_file.replace(\".full_score.gz\", \"\")] = temp_df.copy()\n\n print(\n \"--score_file loaded: n_trait=%d, (sys_time=%0.1fs)\"\n % (len(dic_score), time.time() - sys_start_time)\n )\n print(\"\")\n\n ###########################################################################################\n ###### Computation ######\n ###########################################################################################\n STR_ANALYSIS = \"Perform downstream analyses:\"\n i = 1\n for ct in CELLTYPE_LIST:\n STR_ANALYSIS += \"\\n%d. Cell type-level analysis using %s\" % (i, ct)\n STR_ANALYSIS += \": results in @.scdrs_ct.%s\" % (ct)\n i += 1\n if len(VARIABLE_LIST) > 0:\n STR_ANALYSIS += \"\\n%d. Variable-disease correlation analysis for (%s)\" % (\n i,\n \",\".join(VARIABLE_LIST),\n )\n STR_ANALYSIS += \": results in @.scdrs_var\"\n i += 1\n if FLAG_GENE is True:\n STR_ANALYSIS += \"\\n%d. Disease gene prioritization\" % i\n STR_ANALYSIS += \": results in @.scdrs_gene\"\n print(STR_ANALYSIS)\n\n # Compute connectivities if need to do cell type-level analysis\n if (len(CELLTYPE_LIST) > 0) & (\"connectivities\" not in adata.obsp):\n sc.pp.pca(adata, n_comps=20)\n sc.pp.neighbors(adata, n_neighbors=15, n_pcs=20)\n print(\n \"Compute connectivities with `sc.pp.neighbors` because `connectivities` is not found in adata.obsp\"\n )\n\n # A separate file for each trait\n for trait in dic_score.keys():\n cell_list = sorted(set(adata.obs_names) & set(dic_score[trait].index))\n control_list = [\n x for x in dic_score[trait].columns if x.startswith(\"ctrl_norm_score\")\n ]\n n_ctrl = len(control_list)\n df_reg = adata.obs.loc[cell_list, CELLTYPE_LIST + VARIABLE_LIST].copy()\n df_reg = df_reg.join(\n dic_score[trait].loc[cell_list, [\"norm_score\"] + control_list]\n )\n\n # Cell type-disease analysis: association+heterogeneity\n for ct_col in CELLTYPE_LIST:\n ct_list = sorted(set(adata.obs[ct_col]))\n col_list = [\n \"n_cell\",\n \"n_ctrl\",\n \"assoc_mcp\",\n \"assoc_mcz\",\n \"hetero_mcp\",\n \"hetero_mcz\",\n ]\n df_res = pd.DataFrame(index=ct_list, columns=col_list, dtype=np.float32)\n # Basic info\n for ct in ct_list:\n ct_cell_list = list(df_reg.index[df_reg[ct_col] == ct])\n df_res.loc[ct, [\"n_cell\", \"n_ctrl\"]] = [len(ct_cell_list), n_ctrl]\n # Association\n for ct in ct_list:\n ct_cell_list = list(df_reg.index[df_reg[ct_col] == ct])\n score_q95 = np.quantile(df_reg.loc[ct_cell_list, \"norm_score\"], 0.95)\n v_ctrl_score_q95 = np.quantile(\n df_reg.loc[ct_cell_list, control_list], 0.95, axis=0\n )\n mc_p = ((v_ctrl_score_q95 >= score_q95).sum() + 1) / (\n v_ctrl_score_q95.shape[0] + 1\n )\n mc_z = (score_q95 - v_ctrl_score_q95.mean()) / v_ctrl_score_q95.std()\n df_res.loc[ct, [\"assoc_mcp\", \"assoc_mcz\"]] = [mc_p, mc_z]\n # Heterogeneity\n # subset to common set of cells\n df_rls = md.test_gearysc(\n adata[cell_list], df_reg.loc[cell_list, :], groupby=ct_col\n )\n for ct in ct_list:\n mc_p, mc_z = df_rls.loc[ct, [\"pval\", \"zsc\"]]\n df_res.loc[ct, [\"hetero_mcp\", \"hetero_mcz\"]] = [mc_p, mc_z]\n\n df_res.to_csv(\n os.path.join(\n OUT_FOLDER, \"%s.scdrs_ct.%s\" % (trait, ct_col.replace(\" \", \"_\"))\n ),\n sep=\"\\t\",\n index=True,\n )\n print(\n \"%s: cell type-level analysis with label=%s (sys_time=%0.1fs)\"\n % (trait, ct_col, time.time() - sys_start_time)\n )\n\n # Variable-disease correlation\n if len(VARIABLE_LIST) > 0:\n col_list = [\"n_ctrl\", \"corr_mcp\", \"corr_mcz\"]\n df_res = pd.DataFrame(\n index=VARIABLE_LIST, columns=col_list, dtype=np.float32\n )\n for var_col in VARIABLE_LIST:\n corr_ = np.corrcoef(df_reg[var_col], df_reg[\"norm_score\"])[0, 1]\n v_corr_ = [\n np.corrcoef(df_reg[var_col], df_reg[\"ctrl_norm_score_%d\" % x])[0, 1]\n for x in np.arange(n_ctrl)\n ]\n v_corr_ = np.array(v_corr_)\n mc_p = ((v_corr_ >= corr_).sum() + 1) / (v_corr_.shape[0] + 1)\n mc_z = (corr_ - v_corr_.mean()) / v_corr_.std()\n df_res.loc[var_col] = [n_ctrl, mc_p, mc_z]\n df_res.to_csv(\n os.path.join(OUT_FOLDER, \"%s.scdrs_var\" % trait), sep=\"\\t\", index=True\n )\n print(\n \"%s: cell-level variable-disease correlation analysis (sys_time=%0.1fs)\"\n % (trait, time.time() - sys_start_time)\n )\n\n # Gene prioritization\n if FLAG_GENE is True:\n mat_expr = adata[df_reg.index].X.copy()\n v_corr = md._pearson_corr(mat_expr, df_reg[\"norm_score\"].values)\n df_res = pd.DataFrame(\n index=adata.var_names, columns=[\"CORR\", \"RANK\"], dtype=np.float32\n )\n df_res[\"CORR\"] = v_corr\n df_res.sort_values(\"CORR\", ascending=False, inplace=True)\n df_res[\"RANK\"] = np.arange(df_res.shape[0])\n df_res.to_csv(\n os.path.join(OUT_FOLDER, \"%s.scdrs_gene\" % trait), sep=\"\\t\", index=True\n )\n print(\n \"%s: disease gene prioritization (sys_time=%0.1fs)\"\n % (trait, time.time() - sys_start_time)\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"compute score\")\n\n parser.add_argument(\"--h5ad_file\", type=str, required=True)\n parser.add_argument(\n \"--score_file\",\n type=str,\n required=True,\n help=\"@.full_score.gz where @ denotes trait names\",\n )\n parser.add_argument(\n \"--cell_type\",\n type=str,\n required=False,\n default=None,\n help=\"Comma-seprated coloumn names for cell types/tissues, \"\n \"used for assessing cell type-disease association and \"\n \"within-cell type disease association heterogeneity\",\n )\n parser.add_argument(\n \"--cell_variable\",\n type=str,\n required=False,\n default=None,\n help=\"Comma-seprated coloumn names for cell-level variables, \"\n \"used for associating cell-level variables to disease scores\",\n )\n parser.add_argument(\n \"--flag_gene\",\n type=str,\n required=False,\n default=False,\n help=\"If True, perform gene prioritization\",\n )\n parser.add_argument(\n \"--flag_filter\",\n type=str,\n required=False,\n default=\"True\",\n help=\"If to apply cell and gene filters to the h5ad_file data\",\n )\n parser.add_argument(\n \"--flag_raw_count\",\n type=str,\n required=False,\n default=\"True\",\n help=\"If True, apply size factor normalization and log1p transformation\",\n )\n parser.add_argument(\n \"--out_folder\",\n type=str,\n required=True,\n help=\"Save file at out_folder/trait.scdrs_res\",\n )\n\n args = parser.parse_args()\n\n main(args)",
"import scdrs\nimport os\nimport subprocess\nimport pandas as pd\nimport numpy as np\nimport tempfile\nfrom .test_method_score_cell_main import compare_score_file\n\n\ndef test_score_cell_cli():\n \"\"\"\n Test CLI `scdrs compute-score`\n \"\"\"\n # Load toy data\n ROOT_DIR = scdrs.__path__[0]\n H5AD_FILE = os.path.join(ROOT_DIR, \"data/toydata_mouse.h5ad\")\n COV_FILE = os.path.join(ROOT_DIR, \"data/toydata_mouse.cov\")\n assert os.path.exists(H5AD_FILE), \"built-in data toydata_mouse.h5ad missing\"\n assert os.path.exists(COV_FILE), \"built-in data toydata_mouse.cov missing\"\n\n tmp_dir = tempfile.TemporaryDirectory()\n tmp_dir_path = tmp_dir.name\n dict_df_score = {}\n for gs_species in [\"human\", \"mouse\"]:\n gs_file = os.path.join(ROOT_DIR, f\"data/toydata_{gs_species}.gs\")\n # call compute_score.py\n cmds = [\n f\"scdrs compute-score\",\n f\"--h5ad_file {H5AD_FILE}\",\n \"--h5ad_species mouse\",\n f\"--gs_file {gs_file}\",\n f\"--gs_species {gs_species}\",\n f\"--cov_file {COV_FILE}\",\n \"--ctrl_match_opt mean_var\",\n \"--n_ctrl 20\",\n \"--flag_filter_data False\",\n \"--weight_opt vs\",\n \"--flag_raw_count False\",\n \"--flag_return_ctrl_raw_score False\",\n \"--flag_return_ctrl_norm_score False\",\n f\"--out_folder {tmp_dir_path}\",\n ]\n subprocess.check_call(\" \".join(cmds), shell=True)\n dict_df_score[gs_species] = pd.read_csv(\n os.path.join(tmp_dir_path, f\"toydata_gs_{gs_species}.score.gz\"),\n sep=\"\\t\",\n index_col=0,\n )\n # consistency between human and mouse\n assert np.all(dict_df_score[\"mouse\"].pval == dict_df_score[\"human\"].pval)\n\n df_res = dict_df_score[\"mouse\"]\n\n REF_COV_FILE = os.path.join(\n ROOT_DIR, \"data/toydata_gs_mouse.ref_Ctrl20_CovConstCovariate.score.gz\"\n )\n df_ref_res = pd.read_csv(REF_COV_FILE, sep=\"\\t\", index_col=0)\n compare_score_file(df_res, df_ref_res)\n tmp_dir.cleanup()\n return\n\n\ndef test_munge_gs_cli():\n \"\"\"\n Test CLI `scdrs munge-gs`\n \"\"\"\n\n tmp_dir = tempfile.TemporaryDirectory()\n tmp_dir_path = tmp_dir.name\n\n # pval_file and zscore_file\n temp_df = pd.DataFrame(\n data={\n \"HEIGHT\": [0.02, np.nan, 0.4],\n \"BMI\": [0.8, 0.02, np.nan],\n }\n )\n temp_df.index = [\"OR4F5\", \"DAZ1\", \"BPY2B\"]\n temp_df.to_csv(os.path.join(tmp_dir_path, \"pval_file.tsv\"), sep=\"\\t\", index=True)\n temp_df = pd.DataFrame(\n data={\n \"GENE\": [\"OR4F5\", \"DAZ1\", \"BPY2B\"],\n \"HEIGHT\": [2.0537, np.nan, 0.25335],\n \"BMI\": [-0.84162, 2.0537, np.nan],\n }\n )\n temp_df.to_csv(os.path.join(tmp_dir_path, \"zscore_file.tsv\"), sep=\"\\t\", index=False)\n\n dict_df_score = {}\n for input_file in [\"pval_file\", \"zscore_file\"]:\n for selection in [\n \"--n-max 1\",\n \"--n-min 1 --n-max 3 --fdr 0.05\",\n \"--n-min 1 --n-max 3 --fwer 0.05\",\n ]:\n # Call scdrs munge-gs\n input_file_path = os.path.join(tmp_dir_path, \"%s.tsv\" % input_file)\n output_file_path = os.path.join(tmp_dir_path, f\"outfile.gs\")\n cmds = [\n \"scdrs munge-gs\",\n f\"--{input_file} {input_file_path}\",\n f\"--out-file {output_file_path}\",\n \"--weight zscore\",\n selection,\n ]\n subprocess.check_call(\" \".join(cmds), shell=True)\n temp_df = pd.read_csv(\n os.path.join(tmp_dir_path, f\"outfile.gs\"),\n sep=\"\\t\",\n index_col=0,\n )\n\n # Check results\n print('Generated .gs file:')\n print(temp_df)\n err_msg = \"input_file=%s, %s\" % (input_file, selection)\n assert list(temp_df.index) == [\"BMI\", \"HEIGHT\"], err_msg\n assert temp_df.loc[\"BMI\", \"GENESET\"] == \"DAZ1:2.0537\", err_msg\n assert temp_df.loc[\"HEIGHT\", \"GENESET\"] == \"OR4F5:2.0537\", err_msg\n\n tmp_dir.cleanup()\n\n return\n\n\ndef test_downstream_cli():\n \"\"\"\n Test CLI `scdrs perform-downstream`\n\n 1. --group-analysis cell_type\n 2. --corr-analysis causal_variable,non_causal_variable,covariate\n 3. --gene-analysis\n \"\"\"\n\n # Load toy data\n ROOT_DIR = scdrs.__path__[0]\n H5AD_FILE = os.path.join(ROOT_DIR, \"data/toydata_mouse.h5ad\")\n SCORE_FILE = os.path.join(ROOT_DIR, \"data/@.full_score.gz\")\n REF_RES_DIR = os.path.join(ROOT_DIR, \"data/\")\n\n tmp_dir = tempfile.TemporaryDirectory()\n tmp_dir_path = tmp_dir.name\n for task in [\n \"--group-analysis cell_type\",\n \"--corr-analysis causal_variable,non_causal_variable,covariate\",\n \"--gene-analysis\",\n ]:\n # Call scdrs downstream\n cmds = [\n f\"scdrs perform-downstream\",\n f\"--h5ad_file {H5AD_FILE}\",\n f\"--score-file {SCORE_FILE}\",\n task,\n \"--flag-filter-data False\",\n \"--flag-raw-count False\",\n \"--knn-n-neighbors 15\",\n \"--knn-n-pcs 20\",\n f\"--out-folder {tmp_dir_path}\",\n ]\n subprocess.check_call(\" \".join(cmds), shell=True)\n\n # Check consistency between computed results and reference results\n for prefix in [\"toydata_gs_mouse.ref_Ctrl20_CovConstCovariate\"]:\n for suffix in [\"scdrs_group.cell_type\", \"scdrs_gene\", \"scdrs_cell_corr\"]:\n res_path = os.path.join(tmp_dir_path, f\"{prefix}.{suffix}\")\n ref_res_path = os.path.join(REF_RES_DIR, f\"{prefix}.{suffix}\")\n df_res = pd.read_csv(res_path, sep=\"\\t\", index_col=0)\n df_ref_res = pd.read_csv(ref_res_path, sep=\"\\t\", index_col=0)\n print(df_res)\n assert np.allclose(\n df_res.values, df_ref_res.values\n ), '%s, %s'%(prefix, suffix)\n\n tmp_dir.cleanup()\n return"
] |
[
[
"pandas.read_csv",
"numpy.arange",
"numpy.quantile",
"pandas.DataFrame",
"numpy.corrcoef",
"numpy.array"
],
[
"numpy.all",
"pandas.read_csv",
"numpy.allclose",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
nofarm3/pandas
|
[
"963cf2b5abf4e1ee99a7f6b9031ad485804c5dff",
"c5b4272ed1e7d71266e06660ce9970527711fd55"
] |
[
"pandas/core/apply.py",
"pandas/core/frame.py"
] |
[
"from __future__ import annotations\n\nimport abc\nimport inspect\nfrom typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple, Type, cast\n\nimport numpy as np\n\nfrom pandas._config import option_context\n\nfrom pandas._libs import lib\nfrom pandas._typing import (\n AggFuncType,\n AggFuncTypeBase,\n AggFuncTypeDict,\n Axis,\n FrameOrSeriesUnion,\n)\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.common import (\n is_dict_like,\n is_extension_array_dtype,\n is_list_like,\n is_sequence,\n)\nfrom pandas.core.dtypes.generic import ABCSeries\n\nfrom pandas.core.aggregation import agg_dict_like, agg_list_like\nfrom pandas.core.construction import (\n array as pd_array,\n create_series_with_explicit_dtype,\n)\n\nif TYPE_CHECKING:\n from pandas import DataFrame, Index, Series\n\nResType = Dict[int, Any]\n\n\ndef frame_apply(\n obj: DataFrame,\n func: AggFuncType,\n axis: Axis = 0,\n raw: bool = False,\n result_type: Optional[str] = None,\n args=None,\n kwds=None,\n) -> FrameApply:\n \"\"\" construct and return a row or column based frame apply object \"\"\"\n axis = obj._get_axis_number(axis)\n klass: Type[FrameApply]\n if axis == 0:\n klass = FrameRowApply\n elif axis == 1:\n klass = FrameColumnApply\n\n return klass(\n obj,\n func,\n raw=raw,\n result_type=result_type,\n args=args,\n kwds=kwds,\n )\n\n\ndef series_apply(\n obj: Series,\n func: AggFuncType,\n convert_dtype: bool = True,\n args=None,\n kwds=None,\n) -> SeriesApply:\n return SeriesApply(\n obj,\n func,\n convert_dtype,\n args,\n kwds,\n )\n\n\nclass Apply(metaclass=abc.ABCMeta):\n axis: int\n\n def __init__(\n self,\n obj: FrameOrSeriesUnion,\n func,\n raw: bool,\n result_type: Optional[str],\n args,\n kwds,\n ):\n self.obj = obj\n self.raw = raw\n self.args = args or ()\n self.kwds = kwds or {}\n\n if result_type not in [None, \"reduce\", \"broadcast\", \"expand\"]:\n raise ValueError(\n \"invalid value for result_type, must be one \"\n \"of {None, 'reduce', 'broadcast', 'expand'}\"\n )\n\n self.result_type = result_type\n\n # curry if needed\n if (\n (kwds or args)\n and not isinstance(func, (np.ufunc, str))\n and not is_list_like(func)\n ):\n\n def f(x):\n return func(x, *args, **kwds)\n\n else:\n f = func\n\n self.f: AggFuncType = f\n\n @property\n def index(self) -> Index:\n return self.obj.index\n\n @abc.abstractmethod\n def apply(self) -> FrameOrSeriesUnion:\n pass\n\n def agg(self) -> Tuple[Optional[FrameOrSeriesUnion], Optional[bool]]:\n \"\"\"\n Provide an implementation for the aggregators.\n\n Returns\n -------\n tuple of result, how.\n\n Notes\n -----\n how can be a string describe the required post-processing, or\n None if not required.\n \"\"\"\n obj = self.obj\n arg = self.f\n args = self.args\n kwargs = self.kwds\n\n _axis = kwargs.pop(\"_axis\", None)\n if _axis is None:\n _axis = getattr(obj, \"axis\", 0)\n\n result = self.maybe_apply_str()\n if result is not None:\n return result, None\n\n if is_dict_like(arg):\n arg = cast(AggFuncTypeDict, arg)\n return agg_dict_like(obj, arg, _axis), True\n elif is_list_like(arg):\n # we require a list, but not a 'str'\n arg = cast(List[AggFuncTypeBase], arg)\n return agg_list_like(obj, arg, _axis=_axis), None\n else:\n result = None\n\n if callable(arg):\n f = obj._get_cython_func(arg)\n if f and not args and not kwargs:\n return getattr(obj, f)(), None\n\n # caller can react\n return result, True\n\n def maybe_apply_str(self) -> Optional[FrameOrSeriesUnion]:\n \"\"\"\n Compute apply in case of a string.\n\n Returns\n -------\n result: Series, DataFrame, or None\n Result when self.f is a string, None otherwise.\n \"\"\"\n f = self.f\n if not isinstance(f, str):\n return None\n # Support for `frame.transform('method')`\n # Some methods (shift, etc.) require the axis argument, others\n # don't, so inspect and insert if necessary.\n func = getattr(self.obj, f, None)\n if callable(func):\n sig = inspect.getfullargspec(func)\n if \"axis\" in sig.args:\n self.kwds[\"axis\"] = self.axis\n return self.obj._try_aggregate_string_function(f, *self.args, **self.kwds)\n\n def maybe_apply_multiple(self) -> Optional[FrameOrSeriesUnion]:\n \"\"\"\n Compute apply in case of a list-like or dict-like.\n\n Returns\n -------\n result: Series, DataFrame, or None\n Result when self.f is a list-like or dict-like, None otherwise.\n \"\"\"\n # Note: dict-likes are list-like\n if not is_list_like(self.f):\n return None\n return self.obj.aggregate(self.f, self.axis, *self.args, **self.kwds)\n\n\nclass FrameApply(Apply):\n obj: DataFrame\n\n # ---------------------------------------------------------------\n # Abstract Methods\n\n @property\n @abc.abstractmethod\n def result_index(self) -> Index:\n pass\n\n @property\n @abc.abstractmethod\n def result_columns(self) -> Index:\n pass\n\n @property\n @abc.abstractmethod\n def series_generator(self) -> Iterator[Series]:\n pass\n\n @abc.abstractmethod\n def wrap_results_for_axis(\n self, results: ResType, res_index: Index\n ) -> FrameOrSeriesUnion:\n pass\n\n # ---------------------------------------------------------------\n\n @property\n def res_columns(self) -> Index:\n return self.result_columns\n\n @property\n def columns(self) -> Index:\n return self.obj.columns\n\n @cache_readonly\n def values(self):\n return self.obj.values\n\n @cache_readonly\n def dtypes(self) -> Series:\n return self.obj.dtypes\n\n @property\n def agg_axis(self) -> Index:\n return self.obj._get_agg_axis(self.axis)\n\n def apply(self) -> FrameOrSeriesUnion:\n \"\"\" compute the results \"\"\"\n # dispatch to agg\n result = self.maybe_apply_multiple()\n if result is not None:\n return result\n\n # all empty\n if len(self.columns) == 0 and len(self.index) == 0:\n return self.apply_empty_result()\n\n # string dispatch\n result = self.maybe_apply_str()\n if result is not None:\n return result\n\n # ufunc\n elif isinstance(self.f, np.ufunc):\n with np.errstate(all=\"ignore\"):\n results = self.obj._mgr.apply(\"apply\", func=self.f)\n # _constructor will retain self.index and self.columns\n return self.obj._constructor(data=results)\n\n # broadcasting\n if self.result_type == \"broadcast\":\n return self.apply_broadcast(self.obj)\n\n # one axis empty\n elif not all(self.obj.shape):\n return self.apply_empty_result()\n\n # raw\n elif self.raw:\n return self.apply_raw()\n\n return self.apply_standard()\n\n def apply_empty_result(self):\n \"\"\"\n we have an empty result; at least 1 axis is 0\n\n we will try to apply the function to an empty\n series in order to see if this is a reduction function\n \"\"\"\n assert callable(self.f)\n\n # we are not asked to reduce or infer reduction\n # so just return a copy of the existing object\n if self.result_type not in [\"reduce\", None]:\n return self.obj.copy()\n\n # we may need to infer\n should_reduce = self.result_type == \"reduce\"\n\n from pandas import Series\n\n if not should_reduce:\n try:\n r = self.f(Series([], dtype=np.float64))\n except Exception:\n pass\n else:\n should_reduce = not isinstance(r, Series)\n\n if should_reduce:\n if len(self.agg_axis):\n r = self.f(Series([], dtype=np.float64))\n else:\n r = np.nan\n\n return self.obj._constructor_sliced(r, index=self.agg_axis)\n else:\n return self.obj.copy()\n\n def apply_raw(self):\n \"\"\" apply to the values as a numpy array \"\"\"\n\n def wrap_function(func):\n \"\"\"\n Wrap user supplied function to work around numpy issue.\n\n see https://github.com/numpy/numpy/issues/8352\n \"\"\"\n\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n if isinstance(result, str):\n result = np.array(result, dtype=object)\n return result\n\n return wrapper\n\n result = np.apply_along_axis(wrap_function(self.f), self.axis, self.values)\n\n # TODO: mixed type case\n if result.ndim == 2:\n return self.obj._constructor(result, index=self.index, columns=self.columns)\n else:\n return self.obj._constructor_sliced(result, index=self.agg_axis)\n\n def apply_broadcast(self, target: DataFrame) -> DataFrame:\n assert callable(self.f)\n\n result_values = np.empty_like(target.values)\n\n # axis which we want to compare compliance\n result_compare = target.shape[0]\n\n for i, col in enumerate(target.columns):\n res = self.f(target[col])\n ares = np.asarray(res).ndim\n\n # must be a scalar or 1d\n if ares > 1:\n raise ValueError(\"too many dims to broadcast\")\n elif ares == 1:\n\n # must match return dim\n if result_compare != len(res):\n raise ValueError(\"cannot broadcast result\")\n\n result_values[:, i] = res\n\n # we *always* preserve the original index / columns\n result = self.obj._constructor(\n result_values, index=target.index, columns=target.columns\n )\n return result\n\n def apply_standard(self):\n results, res_index = self.apply_series_generator()\n\n # wrap results\n return self.wrap_results(results, res_index)\n\n def apply_series_generator(self) -> Tuple[ResType, Index]:\n assert callable(self.f)\n\n series_gen = self.series_generator\n res_index = self.result_index\n\n results = {}\n\n with option_context(\"mode.chained_assignment\", None):\n for i, v in enumerate(series_gen):\n # ignore SettingWithCopy here in case the user mutates\n results[i] = self.f(v)\n if isinstance(results[i], ABCSeries):\n # If we have a view on v, we need to make a copy because\n # series_generator will swap out the underlying data\n results[i] = results[i].copy(deep=False)\n\n return results, res_index\n\n def wrap_results(self, results: ResType, res_index: Index) -> FrameOrSeriesUnion:\n from pandas import Series\n\n # see if we can infer the results\n if len(results) > 0 and 0 in results and is_sequence(results[0]):\n return self.wrap_results_for_axis(results, res_index)\n\n # dict of scalars\n\n # the default dtype of an empty Series will be `object`, but this\n # code can be hit by df.mean() where the result should have dtype\n # float64 even if it's an empty Series.\n constructor_sliced = self.obj._constructor_sliced\n if constructor_sliced is Series:\n result = create_series_with_explicit_dtype(\n results, dtype_if_empty=np.float64\n )\n else:\n result = constructor_sliced(results)\n result.index = res_index\n\n return result\n\n\nclass FrameRowApply(FrameApply):\n axis = 0\n\n def apply_broadcast(self, target: DataFrame) -> DataFrame:\n return super().apply_broadcast(target)\n\n @property\n def series_generator(self):\n return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))\n\n @property\n def result_index(self) -> Index:\n return self.columns\n\n @property\n def result_columns(self) -> Index:\n return self.index\n\n def wrap_results_for_axis(\n self, results: ResType, res_index: Index\n ) -> FrameOrSeriesUnion:\n \"\"\" return the results for the rows \"\"\"\n\n if self.result_type == \"reduce\":\n # e.g. test_apply_dict GH#8735\n res = self.obj._constructor_sliced(results)\n res.index = res_index\n return res\n\n elif self.result_type is None and all(\n isinstance(x, dict) for x in results.values()\n ):\n # Our operation was a to_dict op e.g.\n # test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544\n res = self.obj._constructor_sliced(results)\n res.index = res_index\n return res\n\n try:\n result = self.obj._constructor(data=results)\n except ValueError as err:\n if \"All arrays must be of the same length\" in str(err):\n # e.g. result = [[2, 3], [1.5], ['foo', 'bar']]\n # see test_agg_listlike_result GH#29587\n res = self.obj._constructor_sliced(results)\n res.index = res_index\n return res\n else:\n raise\n\n if not isinstance(results[0], ABCSeries):\n if len(result.index) == len(self.res_columns):\n result.index = self.res_columns\n\n if len(result.columns) == len(res_index):\n result.columns = res_index\n\n return result\n\n\nclass FrameColumnApply(FrameApply):\n axis = 1\n\n def apply_broadcast(self, target: DataFrame) -> DataFrame:\n result = super().apply_broadcast(target.T)\n return result.T\n\n @property\n def series_generator(self):\n values = self.values\n assert len(values) > 0\n\n # We create one Series object, and will swap out the data inside\n # of it. Kids: don't do this at home.\n ser = self.obj._ixs(0, axis=0)\n mgr = ser._mgr\n blk = mgr.blocks[0]\n\n if is_extension_array_dtype(blk.dtype):\n # values will be incorrect for this block\n # TODO(EA2D): special case would be unnecessary with 2D EAs\n obj = self.obj\n for i in range(len(obj)):\n yield obj._ixs(i, axis=0)\n\n else:\n for (arr, name) in zip(values, self.index):\n # GH#35462 re-pin mgr in case setitem changed it\n ser._mgr = mgr\n blk.values = arr\n ser.name = name\n yield ser\n\n @property\n def result_index(self) -> Index:\n return self.index\n\n @property\n def result_columns(self) -> Index:\n return self.columns\n\n def wrap_results_for_axis(\n self, results: ResType, res_index: Index\n ) -> FrameOrSeriesUnion:\n \"\"\" return the results for the columns \"\"\"\n result: FrameOrSeriesUnion\n\n # we have requested to expand\n if self.result_type == \"expand\":\n result = self.infer_to_same_shape(results, res_index)\n\n # we have a non-series and don't want inference\n elif not isinstance(results[0], ABCSeries):\n result = self.obj._constructor_sliced(results)\n result.index = res_index\n\n # we may want to infer results\n else:\n result = self.infer_to_same_shape(results, res_index)\n\n return result\n\n def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:\n \"\"\" infer the results to the same shape as the input object \"\"\"\n result = self.obj._constructor(data=results)\n result = result.T\n\n # set the index\n result.index = res_index\n\n # infer dtypes\n result = result.infer_objects()\n\n return result\n\n\nclass SeriesApply(Apply):\n obj: Series\n axis = 0\n\n def __init__(\n self,\n obj: Series,\n func: AggFuncType,\n convert_dtype: bool,\n args,\n kwds,\n ):\n self.convert_dtype = convert_dtype\n\n super().__init__(\n obj,\n func,\n raw=False,\n result_type=None,\n args=args,\n kwds=kwds,\n )\n\n def apply(self) -> FrameOrSeriesUnion:\n obj = self.obj\n\n if len(obj) == 0:\n return self.apply_empty_result()\n\n # dispatch to agg\n result = self.maybe_apply_multiple()\n if result is not None:\n return result\n\n # if we are a string, try to dispatch\n result = self.maybe_apply_str()\n if result is not None:\n return result\n\n return self.apply_standard()\n\n def apply_empty_result(self) -> Series:\n obj = self.obj\n return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(\n obj, method=\"apply\"\n )\n\n def apply_standard(self) -> FrameOrSeriesUnion:\n f = self.f\n obj = self.obj\n\n with np.errstate(all=\"ignore\"):\n if isinstance(f, np.ufunc):\n return f(obj)\n\n # row-wise access\n if is_extension_array_dtype(obj.dtype) and hasattr(obj._values, \"map\"):\n # GH#23179 some EAs do not have `map`\n mapped = obj._values.map(f)\n else:\n values = obj.astype(object)._values\n mapped = lib.map_infer(values, f, convert=self.convert_dtype)\n\n if len(mapped) and isinstance(mapped[0], ABCSeries):\n # GH 25959 use pd.array instead of tolist\n # so extension arrays can be used\n return obj._constructor_expanddim(pd_array(mapped), index=obj.index)\n else:\n return obj._constructor(mapped, index=obj.index).__finalize__(\n obj, method=\"apply\"\n )\n",
"\"\"\"\nDataFrame\n---------\nAn efficient 2D container for potentially mixed-type time series or other\nlabeled data series.\n\nSimilar to its R counterpart, data.frame, except providing automatic data\nalignment and a host of useful data manipulation methods having to do with the\nlabeling information\n\"\"\"\nfrom __future__ import annotations\n\nimport collections\nfrom collections import abc\nimport datetime\nfrom io import StringIO\nimport itertools\nimport mmap\nfrom textwrap import dedent\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n AnyStr,\n Dict,\n FrozenSet,\n Hashable,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n Union,\n cast,\n overload,\n)\nimport warnings\n\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import algos as libalgos, lib, properties\nfrom pandas._libs.lib import no_default\nfrom pandas._typing import (\n AggFuncType,\n AnyArrayLike,\n ArrayLike,\n Axes,\n Axis,\n ColspaceArgType,\n CompressionOptions,\n Dtype,\n FilePathOrBuffer,\n FloatFormatType,\n FormattersType,\n FrameOrSeriesUnion,\n IndexKeyFunc,\n IndexLabel,\n Level,\n Manager,\n PythonFuncType,\n Renamer,\n StorageOptions,\n Suffixes,\n ValueKeyFunc,\n)\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n deprecate_kwarg,\n doc,\n rewrite_axis_style_signature,\n)\nfrom pandas.util._validators import (\n validate_axis_style_args,\n validate_bool_kwarg,\n validate_percentile,\n)\n\nfrom pandas.core.dtypes.cast import (\n construct_1d_arraylike_from_scalar,\n construct_2d_arraylike_from_scalar,\n find_common_type,\n infer_dtype_from_scalar,\n invalidate_string_dtypes,\n maybe_box_datetimelike,\n maybe_convert_platform,\n maybe_downcast_to_dtype,\n maybe_infer_to_datetimelike,\n validate_numeric_casting,\n)\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_platform_int,\n infer_dtype_from_object,\n is_bool_dtype,\n is_dataclass,\n is_datetime64_any_dtype,\n is_dict_like,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float,\n is_float_dtype,\n is_hashable,\n is_integer,\n is_integer_dtype,\n is_iterator,\n is_list_like,\n is_object_dtype,\n is_scalar,\n is_sequence,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.missing import isna, notna\n\nfrom pandas.core import algorithms, common as com, generic, nanops, ops\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.core.aggregation import reconstruct_func, relabel_result, transform\nfrom pandas.core.arraylike import OpsMixin\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.arrays.sparse import SparseFrameAccessor\nfrom pandas.core.construction import extract_array, sanitize_masked_array\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.core.indexes import base as ibase\nfrom pandas.core.indexes.api import (\n DatetimeIndex,\n Index,\n PeriodIndex,\n ensure_index,\n ensure_index_from_sequences,\n)\nfrom pandas.core.indexes.multi import MultiIndex, maybe_droplevels\nfrom pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable\nfrom pandas.core.internals import ArrayManager, BlockManager\nfrom pandas.core.internals.construction import (\n arrays_to_mgr,\n dataclasses_to_dicts,\n init_dict,\n init_ndarray,\n masked_rec_array_to_mgr,\n mgr_to_mgr,\n nested_data_to_arrays,\n reorder_arrays,\n sanitize_index,\n to_arrays,\n treat_as_nested,\n)\nfrom pandas.core.reshape.melt import melt\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import get_group_index, lexsort_indexer, nargsort\n\nfrom pandas.io.common import get_handle\nfrom pandas.io.formats import console, format as fmt\nfrom pandas.io.formats.info import BaseInfo, DataFrameInfo\nimport pandas.plotting\n\nif TYPE_CHECKING:\n from typing import Literal\n\n from pandas._typing import TimedeltaConvertibleTypes, TimestampConvertibleTypes\n\n from pandas.core.groupby.generic import DataFrameGroupBy\n from pandas.core.resample import Resampler\n\n from pandas.io.formats.style import Styler\n\n# ---------------------------------------------------------------------\n# Docstring templates\n\n_shared_doc_kwargs = {\n \"axes\": \"index, columns\",\n \"klass\": \"DataFrame\",\n \"axes_single_arg\": \"{0 or 'index', 1 or 'columns'}\",\n \"axis\": \"\"\"axis : {0 or 'index', 1 or 'columns'}, default 0\n If 0 or 'index': apply function to each column.\n If 1 or 'columns': apply function to each row.\"\"\",\n \"inplace\": \"\"\"\n inplace : boolean, default False\n If True, performs operation inplace and returns None.\"\"\",\n \"optional_by\": \"\"\"\n by : str or list of str\n Name or list of names to sort by.\n\n - if `axis` is 0 or `'index'` then `by` may contain index\n levels and/or column labels.\n - if `axis` is 1 or `'columns'` then `by` may contain column\n levels and/or index labels.\"\"\",\n \"optional_labels\": \"\"\"labels : array-like, optional\n New labels / index to conform the axis specified by 'axis' to.\"\"\",\n \"optional_axis\": \"\"\"axis : int or str, optional\n Axis to target. Can be either the axis name ('index', 'columns')\n or number (0, 1).\"\"\",\n \"replace_iloc\": \"\"\"\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\"\"\",\n}\n\n_numeric_only_doc = \"\"\"numeric_only : boolean, default None\n Include only float, int, boolean data. If None, will attempt to use\n everything, then use only numeric data\n\"\"\"\n\n_merge_doc = \"\"\"\nMerge DataFrame or named Series objects with a database-style join.\n\nThe join is done on columns or indexes. If joining columns on\ncolumns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes\non indexes or indexes on a column or columns, the index will be passed on.\nWhen performing a cross merge, no column specifications to merge on are\nallowed.\n\nParameters\n----------%s\nright : DataFrame or named Series\n Object to merge with.\nhow : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'\n Type of merge to be performed.\n\n * left: use only keys from left frame, similar to a SQL left outer join;\n preserve key order.\n * right: use only keys from right frame, similar to a SQL right outer join;\n preserve key order.\n * outer: use union of keys from both frames, similar to a SQL full outer\n join; sort keys lexicographically.\n * inner: use intersection of keys from both frames, similar to a SQL inner\n join; preserve the order of the left keys.\n * cross: creates the cartesian product from both frames, preserves the order\n of the left keys.\n\n .. versionadded:: 1.2.0\n\non : label or list\n Column or index level names to join on. These must be found in both\n DataFrames. If `on` is None and not merging on indexes then this defaults\n to the intersection of the columns in both DataFrames.\nleft_on : label or list, or array-like\n Column or index level names to join on in the left DataFrame. Can also\n be an array or list of arrays of the length of the left DataFrame.\n These arrays are treated as if they are columns.\nright_on : label or list, or array-like\n Column or index level names to join on in the right DataFrame. Can also\n be an array or list of arrays of the length of the right DataFrame.\n These arrays are treated as if they are columns.\nleft_index : bool, default False\n Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index\n or a number of columns) must match the number of levels.\nright_index : bool, default False\n Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\nsort : bool, default False\n Sort the join keys lexicographically in the result DataFrame. If False,\n the order of the join keys depends on the join type (how keyword).\nsuffixes : list-like, default is (\"_x\", \"_y\")\n A length-2 sequence where each element is optionally a string\n indicating the suffix to add to overlapping column names in\n `left` and `right` respectively. Pass a value of `None` instead\n of a string to indicate that the column name from `left` or\n `right` should be left as-is, with no suffix. At least one of the\n values must not be None.\ncopy : bool, default True\n If False, avoid copy if possible.\nindicator : bool or str, default False\n If True, adds a column to the output DataFrame called \"_merge\" with\n information on the source of each row. The column can be given a different\n name by providing a string argument. The column will have a Categorical\n type with the value of \"left_only\" for observations whose merge key only\n appears in the left DataFrame, \"right_only\" for observations\n whose merge key only appears in the right DataFrame, and \"both\"\n if the observation's merge key is found in both DataFrames.\n\nvalidate : str, optional\n If specified, checks if merge is of specified type.\n\n * \"one_to_one\" or \"1:1\": check if merge keys are unique in both\n left and right datasets.\n * \"one_to_many\" or \"1:m\": check if merge keys are unique in left\n dataset.\n * \"many_to_one\" or \"m:1\": check if merge keys are unique in right\n dataset.\n * \"many_to_many\" or \"m:m\": allowed, but does not result in checks.\n\nReturns\n-------\nDataFrame\n A DataFrame of the two merged objects.\n\nSee Also\n--------\nmerge_ordered : Merge with optional filling/interpolation.\nmerge_asof : Merge on nearest keys.\nDataFrame.join : Similar method using indices.\n\nNotes\n-----\nSupport for specifying index levels as the `on`, `left_on`, and\n`right_on` parameters was added in version 0.23.0\nSupport for merging named Series objects was added in version 0.24.0\n\nExamples\n--------\n>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n... 'value': [1, 2, 3, 5]})\n>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],\n... 'value': [5, 6, 7, 8]})\n>>> df1\n lkey value\n0 foo 1\n1 bar 2\n2 baz 3\n3 foo 5\n>>> df2\n rkey value\n0 foo 5\n1 bar 6\n2 baz 7\n3 foo 8\n\nMerge df1 and df2 on the lkey and rkey columns. The value columns have\nthe default suffixes, _x and _y, appended.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey')\n lkey value_x rkey value_y\n0 foo 1 foo 5\n1 foo 1 foo 8\n2 foo 5 foo 5\n3 foo 5 foo 8\n4 bar 2 bar 6\n5 baz 3 baz 7\n\nMerge DataFrames df1 and df2 with specified left and right suffixes\nappended to any overlapping columns.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey',\n... suffixes=('_left', '_right'))\n lkey value_left rkey value_right\n0 foo 1 foo 5\n1 foo 1 foo 8\n2 foo 5 foo 5\n3 foo 5 foo 8\n4 bar 2 bar 6\n5 baz 3 baz 7\n\nMerge DataFrames df1 and df2, but raise an exception if the DataFrames have\nany overlapping columns.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))\nTraceback (most recent call last):\n...\nValueError: columns overlap but no suffix specified:\n Index(['value'], dtype='object')\n\n>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})\n>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})\n>>> df1\n a b\n0 foo 1\n1 bar 2\n>>> df2\n a c\n0 foo 3\n1 baz 4\n\n>>> df1.merge(df2, how='inner', on='a')\n a b c\n0 foo 1 3\n\n>>> df1.merge(df2, how='left', on='a')\n a b c\n0 foo 1 3.0\n1 bar 2 NaN\n\n>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})\n>>> df2 = pd.DataFrame({'right': [7, 8]})\n>>> df1\n left\n0 foo\n1 bar\n>>> df2\n right\n0 7\n1 8\n\n>>> df1.merge(df2, how='cross')\n left right\n0 foo 7\n1 foo 8\n2 bar 7\n3 bar 8\n\"\"\"\n\n\n# -----------------------------------------------------------------------\n# DataFrame class\n\n\nclass DataFrame(NDFrame, OpsMixin):\n \"\"\"\n Two-dimensional, size-mutable, potentially heterogeneous tabular data.\n\n Data structure also contains labeled axes (rows and columns).\n Arithmetic operations align on both row and column labels. Can be\n thought of as a dict-like container for Series objects. The primary\n pandas data structure.\n\n Parameters\n ----------\n data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame\n Dict can contain Series, arrays, constants, dataclass or list-like objects. If\n data is a dict, column order follows insertion-order.\n\n .. versionchanged:: 0.25.0\n If data is a list of dicts, column order follows insertion-order.\n\n index : Index or array-like\n Index to use for resulting frame. Will default to RangeIndex if\n no indexing information part of input data and no index provided.\n columns : Index or array-like\n Column labels to use for resulting frame. Will default to\n RangeIndex (0, 1, 2, ..., n) if no column labels are provided.\n dtype : dtype, default None\n Data type to force. Only a single dtype is allowed. If None, infer.\n copy : bool, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input.\n\n See Also\n --------\n DataFrame.from_records : Constructor from tuples, also record arrays.\n DataFrame.from_dict : From dicts of Series, arrays, or dicts.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n read_table : Read general delimited file into DataFrame.\n read_clipboard : Read text from clipboard into DataFrame.\n\n Examples\n --------\n Constructing DataFrame from a dictionary.\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = pd.DataFrame(data=d)\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Notice that the inferred dtype is int64.\n\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n To enforce a single dtype:\n\n >>> df = pd.DataFrame(data=d, dtype=np.int8)\n >>> df.dtypes\n col1 int8\n col2 int8\n dtype: object\n\n Constructing DataFrame from numpy ndarray:\n\n >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),\n ... columns=['a', 'b', 'c'])\n >>> df2\n a b c\n 0 1 2 3\n 1 4 5 6\n 2 7 8 9\n\n Constructing DataFrame from dataclass:\n\n >>> from dataclasses import make_dataclass\n >>> Point = make_dataclass(\"Point\", [(\"x\", int), (\"y\", int)])\n >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])\n x y\n 0 0 0\n 1 0 3\n 2 2 3\n \"\"\"\n\n _internal_names_set = {\"columns\", \"index\"} | NDFrame._internal_names_set\n _typ = \"dataframe\"\n _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray)\n\n @property\n def _constructor(self) -> Type[DataFrame]:\n return DataFrame\n\n _constructor_sliced: Type[Series] = Series\n _hidden_attrs: FrozenSet[str] = NDFrame._hidden_attrs | frozenset([])\n _accessors: Set[str] = {\"sparse\"}\n\n @property\n def _constructor_expanddim(self):\n # GH#31549 raising NotImplementedError on a property causes trouble\n # for `inspect`\n def constructor(*args, **kwargs):\n raise NotImplementedError(\"Not supported for DataFrames!\")\n\n return constructor\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data=None,\n index: Optional[Axes] = None,\n columns: Optional[Axes] = None,\n dtype: Optional[Dtype] = None,\n copy: bool = False,\n ):\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, DataFrame):\n data = data._mgr\n\n if isinstance(data, (BlockManager, ArrayManager)):\n if index is None and columns is None and dtype is None and copy is False:\n # GH#33357 fastpath\n NDFrame.__init__(self, data)\n return\n\n mgr = self._init_mgr(\n data, axes={\"index\": index, \"columns\": columns}, dtype=dtype, copy=copy\n )\n\n elif isinstance(data, dict):\n mgr = init_dict(data, index, columns, dtype=dtype)\n elif isinstance(data, ma.MaskedArray):\n import numpy.ma.mrecords as mrecords\n\n # masked recarray\n if isinstance(data, mrecords.MaskedRecords):\n mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)\n\n # a masked array\n else:\n data = sanitize_masked_array(data)\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n\n elif isinstance(data, (np.ndarray, Series, Index)):\n if data.dtype.names:\n data_columns = list(data.dtype.names)\n data = {k: data[k] for k in data_columns}\n if columns is None:\n columns = data_columns\n mgr = init_dict(data, index, columns, dtype=dtype)\n elif getattr(data, \"name\", None) is not None:\n mgr = init_dict({data.name: data}, index, columns, dtype=dtype)\n else:\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n\n # For data is list-like, or Iterable (will consume into list)\n elif is_list_like(data):\n if not isinstance(data, (abc.Sequence, ExtensionArray)):\n data = list(data)\n if len(data) > 0:\n if is_dataclass(data[0]):\n data = dataclasses_to_dicts(data)\n if treat_as_nested(data):\n arrays, columns, index = nested_data_to_arrays(\n data, columns, index, dtype\n )\n mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)\n else:\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n else:\n mgr = init_dict({}, index, columns, dtype=dtype)\n # For data is scalar\n else:\n if index is None or columns is None:\n raise ValueError(\"DataFrame constructor not properly called!\")\n\n if not dtype:\n dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)\n\n # For data is a scalar extension dtype\n if is_extension_array_dtype(dtype):\n # TODO(EA2D): special case not needed with 2D EAs\n\n values = [\n construct_1d_arraylike_from_scalar(data, len(index), dtype)\n for _ in range(len(columns))\n ]\n mgr = arrays_to_mgr(values, columns, index, columns, dtype=None)\n else:\n values = construct_2d_arraylike_from_scalar(\n data, len(index), len(columns), dtype, copy\n )\n\n mgr = init_ndarray(\n values, index, columns, dtype=values.dtype, copy=False\n )\n\n # ensure correct Manager type according to settings\n manager = get_option(\"mode.data_manager\")\n mgr = mgr_to_mgr(mgr, typ=manager)\n\n NDFrame.__init__(self, mgr)\n\n def _as_manager(self, typ: str) -> DataFrame:\n \"\"\"\n Private helper function to create a DataFrame with specific manager.\n\n Parameters\n ----------\n typ : {\"block\", \"array\"}\n\n Returns\n -------\n DataFrame\n New DataFrame using specified manager type. Is not guaranteed\n to be a copy or not.\n \"\"\"\n new_mgr: Manager\n new_mgr = mgr_to_mgr(self._mgr, typ=typ)\n # fastpath of passing a manager doesn't check the option/manager class\n return DataFrame(new_mgr)\n\n # ----------------------------------------------------------------------\n\n @property\n def axes(self) -> List[Index]:\n \"\"\"\n Return a list representing the axes of the DataFrame.\n\n It has the row axis labels and column axis labels as the only members.\n They are returned in that order.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.axes\n [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],\n dtype='object')]\n \"\"\"\n return [self.index, self.columns]\n\n @property\n def shape(self) -> Tuple[int, int]:\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n\n See Also\n --------\n ndarray.shape : Tuple of array dimensions.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],\n ... 'col3': [5, 6]})\n >>> df.shape\n (2, 3)\n \"\"\"\n return len(self.index), len(self.columns)\n\n @property\n def _is_homogeneous_type(self) -> bool:\n \"\"\"\n Whether all the columns in a DataFrame have the same type.\n\n Returns\n -------\n bool\n\n See Also\n --------\n Index._is_homogeneous_type : Whether the object has a single\n dtype.\n MultiIndex._is_homogeneous_type : Whether all the levels of a\n MultiIndex have the same dtype.\n\n Examples\n --------\n >>> DataFrame({\"A\": [1, 2], \"B\": [3, 4]})._is_homogeneous_type\n True\n >>> DataFrame({\"A\": [1, 2], \"B\": [3.0, 4.0]})._is_homogeneous_type\n False\n\n Items with the same type but different sizes are considered\n different types.\n\n >>> DataFrame({\n ... \"A\": np.array([1, 2], dtype=np.int32),\n ... \"B\": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type\n False\n \"\"\"\n if isinstance(self._mgr, ArrayManager):\n return len({arr.dtype for arr in self._mgr.arrays}) == 1\n if self._mgr.any_extension_types:\n return len({block.dtype for block in self._mgr.blocks}) == 1\n else:\n return not self._is_mixed_type\n\n @property\n def _can_fast_transpose(self) -> bool:\n \"\"\"\n Can we transpose this DataFrame without creating any new array objects.\n \"\"\"\n if isinstance(self._mgr, ArrayManager):\n return False\n if self._mgr.any_extension_types:\n # TODO(EA2D) special case would be unnecessary with 2D EAs\n return False\n return len(self._mgr.blocks) == 1\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def _repr_fits_vertical_(self) -> bool:\n \"\"\"\n Check length against max_rows.\n \"\"\"\n max_rows = get_option(\"display.max_rows\")\n return len(self) <= max_rows\n\n def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:\n \"\"\"\n Check if full repr fits in horizontal boundaries imposed by the display\n options width and max_columns.\n\n In case of non-interactive session, no boundaries apply.\n\n `ignore_width` is here so ipynb+HTML output can behave the way\n users expect. display.max_columns remains in effect.\n GH3541, GH3573\n \"\"\"\n width, height = console.get_console_size()\n max_columns = get_option(\"display.max_columns\")\n nb_columns = len(self.columns)\n\n # exceed max columns\n if (max_columns and nb_columns > max_columns) or (\n (not ignore_width) and width and nb_columns > (width // 2)\n ):\n return False\n\n # used by repr_html under IPython notebook or scripts ignore terminal\n # dims\n if ignore_width or not console.in_interactive_session():\n return True\n\n if get_option(\"display.width\") is not None or console.in_ipython_frontend():\n # check at least the column row for excessive width\n max_rows = 1\n else:\n max_rows = get_option(\"display.max_rows\")\n\n # when auto-detecting, so width=None and not in ipython front end\n # check whether repr fits horizontal by actually checking\n # the width of the rendered repr\n buf = StringIO()\n\n # only care about the stuff we'll actually print out\n # and to_string on entire frame may be expensive\n d = self\n\n if not (max_rows is None): # unlimited rows\n # min of two, where one may be None\n d = d.iloc[: min(max_rows, len(d))]\n else:\n return True\n\n d.to_string(buf=buf)\n value = buf.getvalue()\n repr_width = max(len(line) for line in value.split(\"\\n\"))\n\n return repr_width < width\n\n def _info_repr(self) -> bool:\n \"\"\"\n True if the repr should show the info view.\n \"\"\"\n info_repr_option = get_option(\"display.large_repr\") == \"info\"\n return info_repr_option and not (\n self._repr_fits_horizontal_() and self._repr_fits_vertical_()\n )\n\n def __repr__(self) -> str:\n \"\"\"\n Return a string representation for a particular DataFrame.\n \"\"\"\n buf = StringIO(\"\")\n if self._info_repr():\n self.info(buf=buf)\n return buf.getvalue()\n\n max_rows = get_option(\"display.max_rows\")\n min_rows = get_option(\"display.min_rows\")\n max_cols = get_option(\"display.max_columns\")\n max_colwidth = get_option(\"display.max_colwidth\")\n show_dimensions = get_option(\"display.show_dimensions\")\n if get_option(\"display.expand_frame_repr\"):\n width, _ = console.get_console_size()\n else:\n width = None\n self.to_string(\n buf=buf,\n max_rows=max_rows,\n min_rows=min_rows,\n max_cols=max_cols,\n line_width=width,\n max_colwidth=max_colwidth,\n show_dimensions=show_dimensions,\n )\n\n return buf.getvalue()\n\n def _repr_html_(self) -> Optional[str]:\n \"\"\"\n Return a html representation for a particular DataFrame.\n\n Mainly for IPython notebook.\n \"\"\"\n if self._info_repr():\n buf = StringIO(\"\")\n self.info(buf=buf)\n # need to escape the <class>, should be the first line.\n val = buf.getvalue().replace(\"<\", r\"<\", 1)\n val = val.replace(\">\", r\">\", 1)\n return \"<pre>\" + val + \"</pre>\"\n\n if get_option(\"display.notebook_repr_html\"):\n max_rows = get_option(\"display.max_rows\")\n min_rows = get_option(\"display.min_rows\")\n max_cols = get_option(\"display.max_columns\")\n show_dimensions = get_option(\"display.show_dimensions\")\n\n formatter = fmt.DataFrameFormatter(\n self,\n columns=None,\n col_space=None,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n justify=None,\n index_names=True,\n header=True,\n index=True,\n bold_rows=True,\n escape=True,\n max_rows=max_rows,\n min_rows=min_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n decimal=\".\",\n )\n return fmt.DataFrameRenderer(formatter).to_html(notebook=True)\n else:\n return None\n\n @Substitution(\n header_type=\"bool or sequence\",\n header=\"Write out the column names. If a list of strings \"\n \"is given, it is assumed to be aliases for the \"\n \"column names\",\n col_space_type=\"int, list or dict of int\",\n col_space=\"The minimum width of each column\",\n )\n @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)\n def to_string(\n self,\n buf: Optional[FilePathOrBuffer[str]] = None,\n columns: Optional[Sequence[str]] = None,\n col_space: Optional[int] = None,\n header: Union[bool, Sequence[str]] = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[fmt.FormattersType] = None,\n float_format: Optional[fmt.FloatFormatType] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n justify: Optional[str] = None,\n max_rows: Optional[int] = None,\n min_rows: Optional[int] = None,\n max_cols: Optional[int] = None,\n show_dimensions: bool = False,\n decimal: str = \".\",\n line_width: Optional[int] = None,\n max_colwidth: Optional[int] = None,\n encoding: Optional[str] = None,\n ) -> Optional[str]:\n \"\"\"\n Render a DataFrame to a console-friendly tabular output.\n %(shared_params)s\n line_width : int, optional\n Width to wrap a line in characters.\n max_colwidth : int, optional\n Max width to truncate each column in characters. By default, no limit.\n\n .. versionadded:: 1.0.0\n encoding : str, default \"utf-8\"\n Set character encoding.\n\n .. versionadded:: 1.0\n %(returns)s\n See Also\n --------\n to_html : Convert DataFrame to HTML.\n\n Examples\n --------\n >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}\n >>> df = pd.DataFrame(d)\n >>> print(df.to_string())\n col1 col2\n 0 1 4\n 1 2 5\n 2 3 6\n \"\"\"\n from pandas import option_context\n\n with option_context(\"display.max_colwidth\", max_colwidth):\n formatter = fmt.DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n formatters=formatters,\n float_format=float_format,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n header=header,\n index=index,\n min_rows=min_rows,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n decimal=decimal,\n )\n return fmt.DataFrameRenderer(formatter).to_string(\n buf=buf,\n encoding=encoding,\n line_width=line_width,\n )\n\n # ----------------------------------------------------------------------\n\n @property\n def style(self) -> Styler:\n \"\"\"\n Returns a Styler object.\n\n Contains methods for building a styled HTML representation of the DataFrame.\n\n See Also\n --------\n io.formats.style.Styler : Helps style a DataFrame or Series according to the\n data with HTML and CSS.\n \"\"\"\n from pandas.io.formats.style import Styler\n\n return Styler(self)\n\n _shared_docs[\n \"items\"\n ] = r\"\"\"\n Iterate over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Yields\n ------\n label : object\n The column names for the DataFrame being iterated over.\n content : Series\n The column entries belonging to each label, as a Series.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as\n (index, Series) pairs.\n DataFrame.itertuples : Iterate over DataFrame rows as namedtuples\n of the values.\n\n Examples\n --------\n >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],\n ... 'population': [1864, 22000, 80000]},\n ... index=['panda', 'polar', 'koala'])\n >>> df\n species population\n panda bear 1864\n polar bear 22000\n koala marsupial 80000\n >>> for label, content in df.items():\n ... print(f'label: {label}')\n ... print(f'content: {content}', sep='\\n')\n ...\n label: species\n content:\n panda bear\n polar bear\n koala marsupial\n Name: species, dtype: object\n label: population\n content:\n panda 1864\n polar 22000\n koala 80000\n Name: population, dtype: int64\n \"\"\"\n\n @Appender(_shared_docs[\"items\"])\n def items(self) -> Iterable[Tuple[Hashable, Series]]:\n if self.columns.is_unique and hasattr(self, \"_item_cache\"):\n for k in self.columns:\n yield k, self._get_item_cache(k)\n else:\n for i, k in enumerate(self.columns):\n yield k, self._ixs(i, axis=1)\n\n @Appender(_shared_docs[\"items\"])\n def iteritems(self) -> Iterable[Tuple[Hashable, Series]]:\n yield from self.items()\n\n def iterrows(self) -> Iterable[Tuple[Hashable, Series]]:\n \"\"\"\n Iterate over DataFrame rows as (index, Series) pairs.\n\n Yields\n ------\n index : label or tuple of label\n The index of the row. A tuple for a `MultiIndex`.\n data : Series\n The data of the row as a Series.\n\n See Also\n --------\n DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n 1. Because ``iterrows`` returns a Series for each row,\n it does **not** preserve dtypes across the rows (dtypes are\n preserved across columns for DataFrames). For example,\n\n >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])\n >>> row = next(df.iterrows())[1]\n >>> row\n int 1.0\n float 1.5\n Name: 0, dtype: float64\n >>> print(row['int'].dtype)\n float64\n >>> print(df['int'].dtype)\n int64\n\n To preserve dtypes while iterating over the rows, it is better\n to use :meth:`itertuples` which returns namedtuples of the values\n and which is generally faster than ``iterrows``.\n\n 2. You should **never modify** something you are iterating over.\n This is not guaranteed to work in all cases. Depending on the\n data types, the iterator returns a copy and not a view, and writing\n to it will have no effect.\n \"\"\"\n columns = self.columns\n klass = self._constructor_sliced\n for k, v in zip(self.index, self.values):\n s = klass(v, index=columns, name=k)\n yield k, s\n\n def itertuples(self, index: bool = True, name: Optional[str] = \"Pandas\"):\n \"\"\"\n Iterate over DataFrame rows as namedtuples.\n\n Parameters\n ----------\n index : bool, default True\n If True, return the index as the first element of the tuple.\n name : str or None, default \"Pandas\"\n The name of the returned namedtuples or None to return regular\n tuples.\n\n Returns\n -------\n iterator\n An object to iterate over namedtuples for each row in the\n DataFrame with the first field possibly being the index and\n following fields being the column values.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)\n pairs.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n The column names will be renamed to positional names if they are\n invalid Python identifiers, repeated, or start with an underscore.\n On python versions < 3.7 regular tuples are returned for DataFrames\n with a large number of columns (>254).\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},\n ... index=['dog', 'hawk'])\n >>> df\n num_legs num_wings\n dog 4 0\n hawk 2 2\n >>> for row in df.itertuples():\n ... print(row)\n ...\n Pandas(Index='dog', num_legs=4, num_wings=0)\n Pandas(Index='hawk', num_legs=2, num_wings=2)\n\n By setting the `index` parameter to False we can remove the index\n as the first element of the tuple:\n\n >>> for row in df.itertuples(index=False):\n ... print(row)\n ...\n Pandas(num_legs=4, num_wings=0)\n Pandas(num_legs=2, num_wings=2)\n\n With the `name` parameter set we set a custom name for the yielded\n namedtuples:\n\n >>> for row in df.itertuples(name='Animal'):\n ... print(row)\n ...\n Animal(Index='dog', num_legs=4, num_wings=0)\n Animal(Index='hawk', num_legs=2, num_wings=2)\n \"\"\"\n arrays = []\n fields = list(self.columns)\n if index:\n arrays.append(self.index)\n fields.insert(0, \"Index\")\n\n # use integer indexing because of possible duplicate column names\n arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))\n\n if name is not None:\n # https://github.com/python/mypy/issues/9046\n # error: namedtuple() expects a string literal as the first argument\n itertuple = collections.namedtuple( # type: ignore[misc]\n name, fields, rename=True\n )\n return map(itertuple._make, zip(*arrays))\n\n # fallback to regular tuples\n return zip(*arrays)\n\n def __len__(self) -> int:\n \"\"\"\n Returns length of info axis, but here we use the index.\n \"\"\"\n return len(self.index)\n\n # pandas/core/frame.py:1146: error: Overloaded function signatures 1 and 2\n # overlap with incompatible return types [misc]\n @overload\n def dot(self, other: Series) -> Series: # type: ignore[misc]\n ...\n\n @overload\n def dot(self, other: Union[DataFrame, Index, ArrayLike]) -> DataFrame:\n ...\n\n def dot(self, other: Union[AnyArrayLike, FrameOrSeriesUnion]) -> FrameOrSeriesUnion:\n \"\"\"\n Compute the matrix multiplication between the DataFrame and other.\n\n This method computes the matrix product between the DataFrame and the\n values of an other Series, DataFrame or a numpy array.\n\n It can also be called using ``self @ other`` in Python >= 3.5.\n\n Parameters\n ----------\n other : Series, DataFrame or array-like\n The other object to compute the matrix product with.\n\n Returns\n -------\n Series or DataFrame\n If other is a Series, return the matrix product between self and\n other as a Series. If other is a DataFrame or a numpy.array, return\n the matrix product of self and other in a DataFrame of a np.array.\n\n See Also\n --------\n Series.dot: Similar method for Series.\n\n Notes\n -----\n The dimensions of DataFrame and other must be compatible in order to\n compute the matrix multiplication. In addition, the column names of\n DataFrame and the index of other must contain the same values, as they\n will be aligned prior to the multiplication.\n\n The dot method for Series computes the inner product, instead of the\n matrix product here.\n\n Examples\n --------\n Here we multiply a DataFrame with a Series.\n\n >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n >>> s = pd.Series([1, 1, 2, 1])\n >>> df.dot(s)\n 0 -4\n 1 5\n dtype: int64\n\n Here we multiply a DataFrame with another DataFrame.\n\n >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])\n >>> df.dot(other)\n 0 1\n 0 1 4\n 1 2 2\n\n Note that the dot method give the same result as @\n\n >>> df @ other\n 0 1\n 0 1 4\n 1 2 2\n\n The dot method works also if other is an np.array.\n\n >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])\n >>> df.dot(arr)\n 0 1\n 0 1 4\n 1 2 2\n\n Note how shuffling of the objects does not change the result.\n\n >>> s2 = s.reindex([1, 0, 2, 3])\n >>> df.dot(s2)\n 0 -4\n 1 5\n dtype: int64\n \"\"\"\n if isinstance(other, (Series, DataFrame)):\n common = self.columns.union(other.index)\n if len(common) > len(self.columns) or len(common) > len(other.index):\n raise ValueError(\"matrices are not aligned\")\n\n left = self.reindex(columns=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right._values\n else:\n left = self\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[1] != rvals.shape[0]:\n raise ValueError(\n f\"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}\"\n )\n\n if isinstance(other, DataFrame):\n return self._constructor(\n np.dot(lvals, rvals), index=left.index, columns=other.columns\n )\n elif isinstance(other, Series):\n return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)\n elif isinstance(rvals, (np.ndarray, Index)):\n result = np.dot(lvals, rvals)\n if result.ndim == 2:\n return self._constructor(result, index=left.index)\n else:\n return self._constructor_sliced(result, index=left.index)\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n @overload\n def __matmul__(self, other: Series) -> Series:\n ...\n\n @overload\n def __matmul__(\n self, other: Union[AnyArrayLike, FrameOrSeriesUnion]\n ) -> FrameOrSeriesUnion:\n ...\n\n def __matmul__(\n self, other: Union[AnyArrayLike, FrameOrSeriesUnion]\n ) -> FrameOrSeriesUnion:\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(other)\n\n def __rmatmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n try:\n return self.T.dot(np.transpose(other)).T\n except ValueError as err:\n if \"shape mismatch\" not in str(err):\n raise\n # GH#21581 give exception message for original shapes\n msg = f\"shapes {np.shape(other)} and {self.shape} not aligned\"\n raise ValueError(msg) from err\n\n # ----------------------------------------------------------------------\n # IO methods (to / from other formats)\n\n @classmethod\n def from_dict(cls, data, orient=\"columns\", dtype=None, columns=None) -> DataFrame:\n \"\"\"\n Construct DataFrame from dict of array-like or dicts.\n\n Creates DataFrame object from dictionary by columns or by index\n allowing dtype specification.\n\n Parameters\n ----------\n data : dict\n Of the form {field : array-like} or {field : dict}.\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass 'columns'\n (default). Otherwise if the keys should be rows, pass 'index'.\n dtype : dtype, default None\n Data type to force, otherwise infer.\n columns : list, default None\n Column labels to use when ``orient='index'``. Raises a ValueError\n if used with ``orient='columns'``.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_records : DataFrame from structured ndarray, sequence\n of tuples or dicts, or DataFrame.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n By default the keys of the dict become the DataFrame columns:\n\n >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}\n >>> pd.DataFrame.from_dict(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Specify ``orient='index'`` to create the DataFrame using dictionary\n keys as rows:\n\n >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}\n >>> pd.DataFrame.from_dict(data, orient='index')\n 0 1 2 3\n row_1 3 2 1 0\n row_2 a b c d\n\n When using the 'index' orientation, the column names can be\n specified manually:\n\n >>> pd.DataFrame.from_dict(data, orient='index',\n ... columns=['A', 'B', 'C', 'D'])\n A B C D\n row_1 3 2 1 0\n row_2 a b c d\n \"\"\"\n index = None\n orient = orient.lower()\n if orient == \"index\":\n if len(data) > 0:\n # TODO speed up Series case\n if isinstance(list(data.values())[0], (Series, dict)):\n data = _from_nested_dict(data)\n else:\n data, index = list(data.values()), list(data.keys())\n elif orient == \"columns\":\n if columns is not None:\n raise ValueError(\"cannot use columns parameter with orient='columns'\")\n else: # pragma: no cover\n raise ValueError(\"only recognize index or columns for orient\")\n\n return cls(data, index=index, columns=columns, dtype=dtype)\n\n def to_numpy(\n self, dtype=None, copy: bool = False, na_value=lib.no_default\n ) -> np.ndarray:\n \"\"\"\n Convert the DataFrame to a NumPy array.\n\n .. versionadded:: 0.24.0\n\n By default, the dtype of the returned array will be the common NumPy\n dtype of all types in the DataFrame. For example, if the dtypes are\n ``float16`` and ``float32``, the results dtype will be ``float32``.\n This may require copying data and coercing values, which may be\n expensive.\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to pass to :meth:`numpy.asarray`.\n copy : bool, default False\n Whether to ensure that the returned value is not a view on\n another array. Note that ``copy=False`` does not *ensure* that\n ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that\n a copy is made, even if not strictly necessary.\n na_value : Any, optional\n The value to use for missing values. The default value depends\n on `dtype` and the dtypes of the DataFrame columns.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.to_numpy : Similar method for Series.\n\n Examples\n --------\n >>> pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}).to_numpy()\n array([[1, 3],\n [2, 4]])\n\n With heterogeneous data, the lowest common type will have to\n be used.\n\n >>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [3.0, 4.5]})\n >>> df.to_numpy()\n array([[1. , 3. ],\n [2. , 4.5]])\n\n For a mix of numeric and non-numeric types, the output array will\n have object dtype.\n\n >>> df['C'] = pd.date_range('2000', periods=2)\n >>> df.to_numpy()\n array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],\n [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)\n \"\"\"\n self._consolidate_inplace()\n result = self._mgr.as_array(\n transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value\n )\n if result.dtype is not dtype:\n result = np.array(result, dtype=dtype, copy=False)\n\n return result\n\n def to_dict(self, orient: str = \"dict\", into=dict):\n \"\"\"\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - 'dict' (default) : dict like {column -> {index -> value}}\n - 'list' : dict like {column -> [values]}\n - 'series' : dict like {column -> Series(values)}\n - 'split' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n into : class, default dict\n The collections.abc.Mapping subclass used for all Mappings\n in the return value. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n dict, list or collections.abc.Mapping\n Return a collections.abc.Mapping object representing the DataFrame.\n The resulting transformation depends on the `orient` parameter.\n\n See Also\n --------\n DataFrame.from_dict: Create a DataFrame from a dictionary.\n DataFrame.to_json: Convert a DataFrame to JSON format.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2],\n ... 'col2': [0.5, 0.75]},\n ... index=['row1', 'row2'])\n >>> df\n col1 col2\n row1 1 0.50\n row2 2 0.75\n >>> df.to_dict()\n {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}\n\n You can specify the return orientation.\n\n >>> df.to_dict('series')\n {'col1': row1 1\n row2 2\n Name: col1, dtype: int64,\n 'col2': row1 0.50\n row2 0.75\n Name: col2, dtype: float64}\n\n >>> df.to_dict('split')\n {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],\n 'data': [[1, 0.5], [2, 0.75]]}\n\n >>> df.to_dict('records')\n [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]\n\n >>> df.to_dict('index')\n {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}\n\n You can also specify the mapping type.\n\n >>> from collections import OrderedDict, defaultdict\n >>> df.to_dict(into=OrderedDict)\n OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),\n ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])\n\n If you want a `defaultdict`, you need to initialize it:\n\n >>> dd = defaultdict(list)\n >>> df.to_dict('records', into=dd)\n [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),\n defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]\n \"\"\"\n if not self.columns.is_unique:\n warnings.warn(\n \"DataFrame columns are not unique, some columns will be omitted.\",\n UserWarning,\n stacklevel=2,\n )\n # GH16122\n into_c = com.standardize_mapping(into)\n\n orient = orient.lower()\n # GH32515\n if orient.startswith((\"d\", \"l\", \"s\", \"r\", \"i\")) and orient not in {\n \"dict\",\n \"list\",\n \"series\",\n \"split\",\n \"records\",\n \"index\",\n }:\n warnings.warn(\n \"Using short name for 'orient' is deprecated. Only the \"\n \"options: ('dict', list, 'series', 'split', 'records', 'index') \"\n \"will be used in a future version. Use one of the above \"\n \"to silence this warning.\",\n FutureWarning,\n )\n\n if orient.startswith(\"d\"):\n orient = \"dict\"\n elif orient.startswith(\"l\"):\n orient = \"list\"\n elif orient.startswith(\"sp\"):\n orient = \"split\"\n elif orient.startswith(\"s\"):\n orient = \"series\"\n elif orient.startswith(\"r\"):\n orient = \"records\"\n elif orient.startswith(\"i\"):\n orient = \"index\"\n\n if orient == \"dict\":\n return into_c((k, v.to_dict(into)) for k, v in self.items())\n\n elif orient == \"list\":\n return into_c((k, v.tolist()) for k, v in self.items())\n\n elif orient == \"split\":\n return into_c(\n (\n (\"index\", self.index.tolist()),\n (\"columns\", self.columns.tolist()),\n (\n \"data\",\n [\n list(map(maybe_box_datetimelike, t))\n for t in self.itertuples(index=False, name=None)\n ],\n ),\n )\n )\n\n elif orient == \"series\":\n return into_c((k, maybe_box_datetimelike(v)) for k, v in self.items())\n\n elif orient == \"records\":\n columns = self.columns.tolist()\n rows = (\n dict(zip(columns, row))\n for row in self.itertuples(index=False, name=None)\n )\n return [\n into_c((k, maybe_box_datetimelike(v)) for k, v in row.items())\n for row in rows\n ]\n\n elif orient == \"index\":\n if not self.index.is_unique:\n raise ValueError(\"DataFrame index must be unique for orient='index'.\")\n return into_c(\n (t[0], dict(zip(self.columns, t[1:])))\n for t in self.itertuples(name=None)\n )\n\n else:\n raise ValueError(f\"orient '{orient}' not understood\")\n\n def to_gbq(\n self,\n destination_table: str,\n project_id: Optional[str] = None,\n chunksize: Optional[int] = None,\n reauth: bool = False,\n if_exists: str = \"fail\",\n auth_local_webserver: bool = False,\n table_schema: Optional[List[Dict[str, str]]] = None,\n location: Optional[str] = None,\n progress_bar: bool = True,\n credentials=None,\n ) -> None:\n \"\"\"\n Write a DataFrame to a Google BigQuery table.\n\n This function requires the `pandas-gbq package\n <https://pandas-gbq.readthedocs.io>`__.\n\n See the `How to authenticate with Google BigQuery\n <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__\n guide for authentication instructions.\n\n Parameters\n ----------\n destination_table : str\n Name of table to be written, in the form ``dataset.tablename``.\n project_id : str, optional\n Google BigQuery Account project ID. Optional when available from\n the environment.\n chunksize : int, optional\n Number of rows to be inserted in each chunk from the dataframe.\n Set to ``None`` to load the whole dataframe at once.\n reauth : bool, default False\n Force Google BigQuery to re-authenticate the user. This is useful\n if multiple accounts are used.\n if_exists : str, default 'fail'\n Behavior when the destination table exists. Value can be one of:\n\n ``'fail'``\n If table exists raise pandas_gbq.gbq.TableCreationError.\n ``'replace'``\n If table exists, drop it, recreate it, and insert data.\n ``'append'``\n If table exists, insert data. Create if does not exist.\n auth_local_webserver : bool, default False\n Use the `local webserver flow`_ instead of the `console flow`_\n when getting user credentials.\n\n .. _local webserver flow:\n https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server\n .. _console flow:\n https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console\n\n *New in version 0.2.0 of pandas-gbq*.\n table_schema : list of dicts, optional\n List of BigQuery table fields to which according DataFrame\n columns conform to, e.g. ``[{'name': 'col1', 'type':\n 'STRING'},...]``. If schema is not provided, it will be\n generated according to dtypes of DataFrame columns. See\n BigQuery API documentation on available names of a field.\n\n *New in version 0.3.1 of pandas-gbq*.\n location : str, optional\n Location where the load job should run. See the `BigQuery locations\n documentation\n <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a\n list of available locations. The location must match that of the\n target dataset.\n\n *New in version 0.5.0 of pandas-gbq*.\n progress_bar : bool, default True\n Use the library `tqdm` to show the progress bar for the upload,\n chunk by chunk.\n\n *New in version 0.5.0 of pandas-gbq*.\n credentials : google.auth.credentials.Credentials, optional\n Credentials for accessing Google APIs. Use this parameter to\n override default credentials, such as to use Compute Engine\n :class:`google.auth.compute_engine.Credentials` or Service\n Account :class:`google.oauth2.service_account.Credentials`\n directly.\n\n *New in version 0.8.0 of pandas-gbq*.\n\n .. versionadded:: 0.24.0\n\n See Also\n --------\n pandas_gbq.to_gbq : This function in the pandas-gbq library.\n read_gbq : Read a DataFrame from Google BigQuery.\n \"\"\"\n from pandas.io import gbq\n\n gbq.to_gbq(\n self,\n destination_table,\n project_id=project_id,\n chunksize=chunksize,\n reauth=reauth,\n if_exists=if_exists,\n auth_local_webserver=auth_local_webserver,\n table_schema=table_schema,\n location=location,\n progress_bar=progress_bar,\n credentials=credentials,\n )\n\n @classmethod\n def from_records(\n cls,\n data,\n index=None,\n exclude=None,\n columns=None,\n coerce_float: bool = False,\n nrows=None,\n ) -> DataFrame:\n \"\"\"\n Convert structured or record ndarray to DataFrame.\n\n Creates a DataFrame object from a structured ndarray, sequence of\n tuples or dicts, or DataFrame.\n\n Parameters\n ----------\n data : structured ndarray, sequence of tuples or dicts, or DataFrame\n Structured input data.\n index : str, list of fields, array-like\n Field of array to use as the index, alternately a specific set of\n input labels to use.\n exclude : sequence, default None\n Columns or fields to exclude.\n columns : sequence, default None\n Column names to use. If the passed data do not have names\n associated with them, this argument provides names for the\n columns. Otherwise this argument indicates the order of the columns\n in the result (any names not found in the data will become all-NA\n columns).\n coerce_float : bool, default False\n Attempt to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets.\n nrows : int, default None\n Number of rows to read if data is an iterator.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_dict : DataFrame from dict of array-like or dicts.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n Data can be provided as a structured ndarray:\n\n >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],\n ... dtype=[('col_1', 'i4'), ('col_2', 'U1')])\n >>> pd.DataFrame.from_records(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Data can be provided as a list of dicts:\n\n >>> data = [{'col_1': 3, 'col_2': 'a'},\n ... {'col_1': 2, 'col_2': 'b'},\n ... {'col_1': 1, 'col_2': 'c'},\n ... {'col_1': 0, 'col_2': 'd'}]\n >>> pd.DataFrame.from_records(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Data can be provided as a list of tuples with corresponding columns:\n\n >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]\n >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n \"\"\"\n # Make a copy of the input columns so we can modify it\n if columns is not None:\n columns = ensure_index(columns)\n\n if is_iterator(data):\n if nrows == 0:\n return cls()\n\n try:\n first_row = next(data)\n except StopIteration:\n return cls(index=index, columns=columns)\n\n dtype = None\n if hasattr(first_row, \"dtype\") and first_row.dtype.names:\n dtype = first_row.dtype\n\n values = [first_row]\n\n if nrows is None:\n values += data\n else:\n values.extend(itertools.islice(data, nrows - 1))\n\n if dtype is not None:\n data = np.array(values, dtype=dtype)\n else:\n data = values\n\n if isinstance(data, dict):\n if columns is None:\n columns = arr_columns = ensure_index(sorted(data))\n arrays = [data[k] for k in columns]\n else:\n arrays = []\n arr_columns_list = []\n for k, v in data.items():\n if k in columns:\n arr_columns_list.append(k)\n arrays.append(v)\n\n arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns)\n\n elif isinstance(data, (np.ndarray, DataFrame)):\n arrays, columns = to_arrays(data, columns)\n if columns is not None:\n columns = ensure_index(columns)\n arr_columns = columns\n else:\n arrays, arr_columns = to_arrays(data, columns)\n if coerce_float:\n for i, arr in enumerate(arrays):\n if arr.dtype == object:\n arrays[i] = lib.maybe_convert_objects(arr, try_float=True)\n\n arr_columns = ensure_index(arr_columns)\n if columns is not None:\n columns = ensure_index(columns)\n else:\n columns = arr_columns\n\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n\n result_index = None\n if index is not None:\n if isinstance(index, str) or not hasattr(index, \"__iter__\"):\n i = columns.get_loc(index)\n exclude.add(index)\n if len(arrays) > 0:\n result_index = Index(arrays[i], name=index)\n else:\n result_index = Index([], name=index)\n else:\n try:\n index_data = [arrays[arr_columns.get_loc(field)] for field in index]\n except (KeyError, TypeError):\n # raised by get_loc, see GH#29258\n result_index = index\n else:\n result_index = ensure_index_from_sequences(index_data, names=index)\n exclude.update(index)\n\n if any(exclude):\n arr_exclude = [x for x in exclude if x in arr_columns]\n to_remove = [arr_columns.get_loc(col) for col in arr_exclude]\n arrays = [v for i, v in enumerate(arrays) if i not in to_remove]\n\n arr_columns = arr_columns.drop(arr_exclude)\n columns = columns.drop(exclude)\n\n mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)\n\n return cls(mgr)\n\n def to_records(\n self, index=True, column_dtypes=None, index_dtypes=None\n ) -> np.recarray:\n \"\"\"\n Convert DataFrame to a NumPy record array.\n\n Index will be included as the first field of the record array if\n requested.\n\n Parameters\n ----------\n index : bool, default True\n Include index in resulting record array, stored in 'index'\n field or using the index label, if set.\n column_dtypes : str, type, dict, default None\n .. versionadded:: 0.24.0\n\n If a string or type, the data type to store all columns. If\n a dictionary, a mapping of column names and indices (zero-indexed)\n to specific data types.\n index_dtypes : str, type, dict, default None\n .. versionadded:: 0.24.0\n\n If a string or type, the data type to store all index levels. If\n a dictionary, a mapping of index level names and indices\n (zero-indexed) to specific data types.\n\n This mapping is applied only if `index=True`.\n\n Returns\n -------\n numpy.recarray\n NumPy ndarray with the DataFrame labels as fields and each row\n of the DataFrame as entries.\n\n See Also\n --------\n DataFrame.from_records: Convert structured or record ndarray\n to DataFrame.\n numpy.recarray: An ndarray that allows field access using\n attributes, analogous to typed columns in a\n spreadsheet.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},\n ... index=['a', 'b'])\n >>> df\n A B\n a 1 0.50\n b 2 0.75\n >>> df.to_records()\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])\n\n If the DataFrame index has no label then the recarray field name\n is set to 'index'. If the index has a label then this is used as the\n field name:\n\n >>> df.index = df.index.rename(\"I\")\n >>> df.to_records()\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])\n\n The index can be excluded from the record array:\n\n >>> df.to_records(index=False)\n rec.array([(1, 0.5 ), (2, 0.75)],\n dtype=[('A', '<i8'), ('B', '<f8')])\n\n Data types can be specified for the columns:\n\n >>> df.to_records(column_dtypes={\"A\": \"int32\"})\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])\n\n As well as for the index:\n\n >>> df.to_records(index_dtypes=\"<S2\")\n rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],\n dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])\n\n >>> index_dtypes = f\"<S{df.index.str.len().max()}\"\n >>> df.to_records(index_dtypes=index_dtypes)\n rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],\n dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])\n \"\"\"\n if index:\n if isinstance(self.index, MultiIndex):\n # array of tuples to numpy cols. copy copy copy\n ix_vals = list(map(np.array, zip(*self.index._values)))\n else:\n ix_vals = [self.index.values]\n\n arrays = ix_vals + [\n np.asarray(self.iloc[:, i]) for i in range(len(self.columns))\n ]\n\n count = 0\n index_names = list(self.index.names)\n\n if isinstance(self.index, MultiIndex):\n for i, n in enumerate(index_names):\n if n is None:\n index_names[i] = f\"level_{count}\"\n count += 1\n elif index_names[0] is None:\n index_names = [\"index\"]\n\n names = [str(name) for name in itertools.chain(index_names, self.columns)]\n else:\n arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]\n names = [str(c) for c in self.columns]\n index_names = []\n\n index_len = len(index_names)\n formats = []\n\n for i, v in enumerate(arrays):\n index = i\n\n # When the names and arrays are collected, we\n # first collect those in the DataFrame's index,\n # followed by those in its columns.\n #\n # Thus, the total length of the array is:\n # len(index_names) + len(DataFrame.columns).\n #\n # This check allows us to see whether we are\n # handling a name / array in the index or column.\n if index < index_len:\n dtype_mapping = index_dtypes\n name = index_names[index]\n else:\n index -= index_len\n dtype_mapping = column_dtypes\n name = self.columns[index]\n\n # We have a dictionary, so we get the data type\n # associated with the index or column (which can\n # be denoted by its name in the DataFrame or its\n # position in DataFrame's array of indices or\n # columns, whichever is applicable.\n if is_dict_like(dtype_mapping):\n if name in dtype_mapping:\n dtype_mapping = dtype_mapping[name]\n elif index in dtype_mapping:\n dtype_mapping = dtype_mapping[index]\n else:\n dtype_mapping = None\n\n # If no mapping can be found, use the array's\n # dtype attribute for formatting.\n #\n # A valid dtype must either be a type or\n # string naming a type.\n if dtype_mapping is None:\n formats.append(v.dtype)\n elif isinstance(dtype_mapping, (type, np.dtype, str)):\n formats.append(dtype_mapping)\n else:\n element = \"row\" if i < index_len else \"column\"\n msg = f\"Invalid dtype {dtype_mapping} specified for {element} {name}\"\n raise ValueError(msg)\n\n return np.rec.fromarrays(arrays, dtype={\"names\": names, \"formats\": formats})\n\n @classmethod\n def _from_arrays(\n cls,\n arrays,\n columns,\n index,\n dtype: Optional[Dtype] = None,\n verify_integrity: bool = True,\n ) -> DataFrame:\n \"\"\"\n Create DataFrame from a list of arrays corresponding to the columns.\n\n Parameters\n ----------\n arrays : list-like of arrays\n Each array in the list corresponds to one column, in order.\n columns : list-like, Index\n The column names for the resulting DataFrame.\n index : list-like, Index\n The rows labels for the resulting DataFrame.\n dtype : dtype, optional\n Optional dtype to enforce for all arrays.\n verify_integrity : bool, default True\n Validate and homogenize all input. If set to False, it is assumed\n that all elements of `arrays` are actual arrays how they will be\n stored in a block (numpy ndarray or ExtensionArray), have the same\n length as and are aligned with the index, and that `columns` and\n `index` are ensured to be an Index object.\n\n Returns\n -------\n DataFrame\n \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n mgr = arrays_to_mgr(\n arrays,\n columns,\n index,\n columns,\n dtype=dtype,\n verify_integrity=verify_integrity,\n )\n return cls(mgr)\n\n @doc(storage_options=generic._shared_docs[\"storage_options\"])\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_stata(\n self,\n path: FilePathOrBuffer,\n convert_dates: Optional[Dict[Hashable, str]] = None,\n write_index: bool = True,\n byteorder: Optional[str] = None,\n time_stamp: Optional[datetime.datetime] = None,\n data_label: Optional[str] = None,\n variable_labels: Optional[Dict[Hashable, str]] = None,\n version: Optional[int] = 114,\n convert_strl: Optional[Sequence[Hashable]] = None,\n compression: CompressionOptions = \"infer\",\n storage_options: StorageOptions = None,\n ) -> None:\n \"\"\"\n Export DataFrame object to Stata dta format.\n\n Writes the DataFrame to a Stata dataset file.\n \"dta\" files contain a Stata dataset.\n\n Parameters\n ----------\n path : str, buffer or path object\n String, path object (pathlib.Path or py._path.local.LocalPath) or\n object implementing a binary write() function. If using a buffer\n then the buffer will not be automatically closed after the file\n data has been written.\n\n .. versionchanged:: 1.0.0\n\n Previously this was \"fname\"\n\n convert_dates : dict\n Dictionary mapping columns containing datetime types to stata\n internal format to use when writing the dates. Options are 'tc',\n 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer\n or a name. Datetime columns that do not have a conversion type\n specified will be converted to 'tc'. Raises NotImplementedError if\n a datetime column has timezone information.\n write_index : bool\n Write the index to Stata dataset.\n byteorder : str\n Can be \">\", \"<\", \"little\", or \"big\". default is `sys.byteorder`.\n time_stamp : datetime\n A datetime to use as file creation date. Default is the current\n time.\n data_label : str, optional\n A label for the data set. Must be 80 characters or smaller.\n variable_labels : dict\n Dictionary containing columns as keys and variable labels as\n values. Each label must be 80 characters or smaller.\n version : {{114, 117, 118, 119, None}}, default 114\n Version to use in the output dta file. Set to None to let pandas\n decide between 118 or 119 formats depending on the number of\n columns in the frame. Version 114 can be read by Stata 10 and\n later. Version 117 can be read by Stata 13 or later. Version 118\n is supported in Stata 14 and later. Version 119 is supported in\n Stata 15 and later. Version 114 limits string variables to 244\n characters or fewer while versions 117 and later allow strings\n with lengths up to 2,000,000 characters. Versions 118 and 119\n support Unicode characters, and version 119 supports more than\n 32,767 variables.\n\n Version 119 should usually only be used when the number of\n variables exceeds the capacity of dta format 118. Exporting\n smaller datasets in format 119 may have unintended consequences,\n and, as of November 2020, Stata SE cannot read version 119 files.\n\n .. versionchanged:: 1.0.0\n\n Added support for formats 118 and 119.\n\n convert_strl : list, optional\n List of column names to convert to string columns to Stata StrL\n format. Only available if version is 117. Storing strings in the\n StrL format can produce smaller dta files if strings have more than\n 8 characters and values are repeated.\n compression : str or dict, default 'infer'\n For on-the-fly compression of the output dta. If string, specifies\n compression mode. If dict, value at key 'method' specifies\n compression mode. Compression mode must be one of {{'infer', 'gzip',\n 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and\n `fname` is path-like, then detect compression from the following\n extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no\n compression). If dict and compression mode is one of {{'zip',\n 'gzip', 'bz2'}}, or inferred as one of the above, other entries\n passed as additional compression options.\n\n .. versionadded:: 1.1.0\n\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n Raises\n ------\n NotImplementedError\n * If datetimes contain timezone information\n * Column dtype is not representable in Stata\n ValueError\n * Columns listed in convert_dates are neither datetime64[ns]\n or datetime.datetime\n * Column listed in convert_dates is not in DataFrame\n * Categorical label contains more than 32,000 characters\n\n See Also\n --------\n read_stata : Import Stata data files.\n io.stata.StataWriter : Low-level writer for Stata data files.\n io.stata.StataWriter117 : Low-level writer for version 117 files.\n\n Examples\n --------\n >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',\n ... 'parrot'],\n ... 'speed': [350, 18, 361, 15]}})\n >>> df.to_stata('animals.dta') # doctest: +SKIP\n \"\"\"\n if version not in (114, 117, 118, 119, None):\n raise ValueError(\"Only formats 114, 117, 118 and 119 are supported.\")\n if version == 114:\n if convert_strl is not None:\n raise ValueError(\"strl is not supported in format 114\")\n from pandas.io.stata import StataWriter as statawriter\n elif version == 117:\n # mypy: Name 'statawriter' already defined (possibly by an import)\n from pandas.io.stata import ( # type: ignore[no-redef]\n StataWriter117 as statawriter,\n )\n else: # versions 118 and 119\n # mypy: Name 'statawriter' already defined (possibly by an import)\n from pandas.io.stata import ( # type: ignore[no-redef]\n StataWriterUTF8 as statawriter,\n )\n\n kwargs: Dict[str, Any] = {}\n if version is None or version >= 117:\n # strl conversion is only supported >= 117\n kwargs[\"convert_strl\"] = convert_strl\n if version is None or version >= 118:\n # Specifying the version is only supported for UTF8 (118 or 119)\n kwargs[\"version\"] = version\n\n # mypy: Too many arguments for \"StataWriter\"\n writer = statawriter( # type: ignore[call-arg]\n path,\n self,\n convert_dates=convert_dates,\n byteorder=byteorder,\n time_stamp=time_stamp,\n data_label=data_label,\n write_index=write_index,\n variable_labels=variable_labels,\n compression=compression,\n storage_options=storage_options,\n **kwargs,\n )\n writer.write_file()\n\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:\n \"\"\"\n Write a DataFrame to the binary Feather format.\n\n Parameters\n ----------\n path : str or file-like object\n If a string, it will be used as Root Directory path.\n **kwargs :\n Additional keywords passed to :func:`pyarrow.feather.write_feather`.\n Starting with pyarrow 0.17, this includes the `compression`,\n `compression_level`, `chunksize` and `version` keywords.\n\n .. versionadded:: 1.1.0\n \"\"\"\n from pandas.io.feather_format import to_feather\n\n to_feather(self, path, **kwargs)\n\n @doc(\n Series.to_markdown,\n klass=_shared_doc_kwargs[\"klass\"],\n storage_options=_shared_docs[\"storage_options\"],\n examples=\"\"\"Examples\n --------\n >>> df = pd.DataFrame(\n ... data={\"animal_1\": [\"elk\", \"pig\"], \"animal_2\": [\"dog\", \"quetzal\"]}\n ... )\n >>> print(df.to_markdown())\n | | animal_1 | animal_2 |\n |---:|:-----------|:-----------|\n | 0 | elk | dog |\n | 1 | pig | quetzal |\n\n Output markdown with a tabulate option.\n\n >>> print(df.to_markdown(tablefmt=\"grid\"))\n +----+------------+------------+\n | | animal_1 | animal_2 |\n +====+============+============+\n | 0 | elk | dog |\n +----+------------+------------+\n | 1 | pig | quetzal |\n +----+------------+------------+\n \"\"\",\n )\n def to_markdown(\n self,\n buf: Optional[Union[IO[str], str]] = None,\n mode: str = \"wt\",\n index: bool = True,\n storage_options: StorageOptions = None,\n **kwargs,\n ) -> Optional[str]:\n if \"showindex\" in kwargs:\n warnings.warn(\n \"'showindex' is deprecated. Only 'index' will be used \"\n \"in a future version. Use 'index' to silence this warning.\",\n FutureWarning,\n stacklevel=2,\n )\n\n kwargs.setdefault(\"headers\", \"keys\")\n kwargs.setdefault(\"tablefmt\", \"pipe\")\n kwargs.setdefault(\"showindex\", index)\n tabulate = import_optional_dependency(\"tabulate\")\n result = tabulate.tabulate(self, **kwargs)\n if buf is None:\n return result\n\n with get_handle(buf, mode, storage_options=storage_options) as handles:\n assert not isinstance(handles.handle, (str, mmap.mmap))\n handles.handle.writelines(result)\n return None\n\n @doc(storage_options=generic._shared_docs[\"storage_options\"])\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_parquet(\n self,\n path: Optional[FilePathOrBuffer] = None,\n engine: str = \"auto\",\n compression: Optional[str] = \"snappy\",\n index: Optional[bool] = None,\n partition_cols: Optional[List[str]] = None,\n storage_options: StorageOptions = None,\n **kwargs,\n ) -> Optional[bytes]:\n \"\"\"\n Write a DataFrame to the binary parquet format.\n\n This function writes the dataframe as a `parquet file\n <https://parquet.apache.org/>`_. You can choose different parquet\n backends, and have the option of compression. See\n :ref:`the user guide <io.parquet>` for more details.\n\n Parameters\n ----------\n path : str or file-like object, default None\n If a string, it will be used as Root Directory path\n when writing a partitioned dataset. By file-like object,\n we refer to objects with a write() method, such as a file handle\n (e.g. via builtin open function) or io.BytesIO. The engine\n fastparquet does not accept file-like objects. If path is None,\n a bytes object is returned.\n\n .. versionchanged:: 1.2.0\n\n Previously this was \"fname\"\n\n engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'\n Parquet library to use. If 'auto', then the option\n ``io.parquet.engine`` is used. The default ``io.parquet.engine``\n behavior is to try 'pyarrow', falling back to 'fastparquet' if\n 'pyarrow' is unavailable.\n compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy'\n Name of the compression to use. Use ``None`` for no compression.\n index : bool, default None\n If ``True``, include the dataframe's index(es) in the file output.\n If ``False``, they will not be written to the file.\n If ``None``, similar to ``True`` the dataframe's index(es)\n will be saved. However, instead of being saved as values,\n the RangeIndex will be stored as a range in the metadata so it\n doesn't require much space and is faster. Other indexes will\n be included as columns in the file output.\n\n .. versionadded:: 0.24.0\n\n partition_cols : list, optional, default None\n Column names by which to partition the dataset.\n Columns are partitioned in the order they are given.\n Must be None if path is not a string.\n\n .. versionadded:: 0.24.0\n\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n **kwargs\n Additional arguments passed to the parquet library. See\n :ref:`pandas io <io.parquet>` for more details.\n\n Returns\n -------\n bytes if no path argument is provided else None\n\n See Also\n --------\n read_parquet : Read a parquet file.\n DataFrame.to_csv : Write a csv file.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_hdf : Write to hdf.\n\n Notes\n -----\n This function requires either the `fastparquet\n <https://pypi.org/project/fastparquet>`_ or `pyarrow\n <https://arrow.apache.org/docs/python/>`_ library.\n\n Examples\n --------\n >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})\n >>> df.to_parquet('df.parquet.gzip',\n ... compression='gzip') # doctest: +SKIP\n >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP\n col1 col2\n 0 1 3\n 1 2 4\n\n If you want to get a buffer to the parquet content you can use a io.BytesIO\n object, as long as you don't use partition_cols, which creates multiple files.\n\n >>> import io\n >>> f = io.BytesIO()\n >>> df.to_parquet(f)\n >>> f.seek(0)\n 0\n >>> content = f.read()\n \"\"\"\n from pandas.io.parquet import to_parquet\n\n return to_parquet(\n self,\n path,\n engine,\n compression=compression,\n index=index,\n partition_cols=partition_cols,\n storage_options=storage_options,\n **kwargs,\n )\n\n @Substitution(\n header_type=\"bool\",\n header=\"Whether to print column labels, default True\",\n col_space_type=\"str or int, list or dict of int or str\",\n col_space=\"The minimum width of each column in CSS length \"\n \"units. An int is assumed to be px units.\\n\\n\"\n \" .. versionadded:: 0.25.0\\n\"\n \" Ability to use str\",\n )\n @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)\n def to_html(\n self,\n buf: Optional[FilePathOrBuffer[str]] = None,\n columns: Optional[Sequence[str]] = None,\n col_space: Optional[ColspaceArgType] = None,\n header: Union[bool, Sequence[str]] = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[FormattersType] = None,\n float_format: Optional[FloatFormatType] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n justify: Optional[str] = None,\n max_rows: Optional[int] = None,\n max_cols: Optional[int] = None,\n show_dimensions: Union[bool, str] = False,\n decimal: str = \".\",\n bold_rows: bool = True,\n classes: Optional[Union[str, List, Tuple]] = None,\n escape: bool = True,\n notebook: bool = False,\n border: Optional[int] = None,\n table_id: Optional[str] = None,\n render_links: bool = False,\n encoding: Optional[str] = None,\n ):\n \"\"\"\n Render a DataFrame as an HTML table.\n %(shared_params)s\n bold_rows : bool, default True\n Make the row labels bold in the output.\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table.\n escape : bool, default True\n Convert the characters <, >, and & to HTML-safe sequences.\n notebook : {True, False}, default False\n Whether the generated HTML is for IPython Notebook.\n border : int\n A ``border=border`` attribute is included in the opening\n `<table>` tag. Default ``pd.options.display.html.border``.\n encoding : str, default \"utf-8\"\n Set character encoding.\n\n .. versionadded:: 1.0\n\n table_id : str, optional\n A css id is included in the opening `<table>` tag if specified.\n render_links : bool, default False\n Convert URLs to HTML links.\n\n .. versionadded:: 0.24.0\n %(returns)s\n See Also\n --------\n to_string : Convert DataFrame to a string.\n \"\"\"\n if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:\n raise ValueError(\"Invalid value for justify parameter\")\n\n formatter = fmt.DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n header=header,\n index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n escape=escape,\n decimal=decimal,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n )\n # TODO: a generic formatter wld b in DataFrameFormatter\n return fmt.DataFrameRenderer(formatter).to_html(\n buf=buf,\n classes=classes,\n notebook=notebook,\n border=border,\n encoding=encoding,\n table_id=table_id,\n render_links=render_links,\n )\n\n # ----------------------------------------------------------------------\n @Substitution(\n klass=\"DataFrame\",\n type_sub=\" and columns\",\n max_cols_sub=dedent(\n \"\"\"\\\n max_cols : int, optional\n When to switch from the verbose to the truncated output. If the\n DataFrame has more than `max_cols` columns, the truncated output\n is used. By default, the setting in\n ``pandas.options.display.max_info_columns`` is used.\"\"\"\n ),\n show_counts_sub=dedent(\n \"\"\"\\\n show_counts : bool, optional\n Whether to show the non-null counts. By default, this is shown\n only if the DataFrame is smaller than\n ``pandas.options.display.max_info_rows`` and\n ``pandas.options.display.max_info_columns``. A value of True always\n shows the counts, and False never shows the counts.\n null_counts : bool, optional\n .. deprecated:: 1.2.0\n Use show_counts instead.\"\"\"\n ),\n examples_sub=dedent(\n \"\"\"\\\n >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']\n >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]\n >>> df = pd.DataFrame({\"int_col\": int_values, \"text_col\": text_values,\n ... \"float_col\": float_values})\n >>> df\n int_col text_col float_col\n 0 1 alpha 0.00\n 1 2 beta 0.25\n 2 3 gamma 0.50\n 3 4 delta 0.75\n 4 5 epsilon 1.00\n\n Prints information of all columns:\n\n >>> df.info(verbose=True)\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 5 entries, 0 to 4\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 int_col 5 non-null int64\n 1 text_col 5 non-null object\n 2 float_col 5 non-null float64\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Prints a summary of columns count and its dtypes but not per column\n information:\n\n >>> df.info(verbose=False)\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 5 entries, 0 to 4\n Columns: 3 entries, int_col to float_col\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Pipe output of DataFrame.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> df.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open(\"df_info.txt\", \"w\",\n ... encoding=\"utf-8\") as f: # doctest: +SKIP\n ... f.write(s)\n 260\n\n The `memory_usage` parameter allows deep introspection mode, specially\n useful for big DataFrames and fine-tune memory optimization:\n\n >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)\n >>> df = pd.DataFrame({\n ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),\n ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),\n ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)\n ... })\n >>> df.info()\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 22.9+ MB\n\n >>> df.info(memory_usage='deep')\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 165.9 MB\"\"\"\n ),\n see_also_sub=dedent(\n \"\"\"\\\n DataFrame.describe: Generate descriptive statistics of DataFrame\n columns.\n DataFrame.memory_usage: Memory usage of DataFrame columns.\"\"\"\n ),\n version_added_sub=\"\",\n )\n @doc(BaseInfo.render)\n def info(\n self,\n verbose: Optional[bool] = None,\n buf: Optional[IO[str]] = None,\n max_cols: Optional[int] = None,\n memory_usage: Optional[Union[bool, str]] = None,\n show_counts: Optional[bool] = None,\n null_counts: Optional[bool] = None,\n ) -> None:\n if null_counts is not None:\n if show_counts is not None:\n raise ValueError(\"null_counts used with show_counts. Use show_counts.\")\n warnings.warn(\n \"null_counts is deprecated. Use show_counts instead\",\n FutureWarning,\n stacklevel=2,\n )\n show_counts = null_counts\n info = DataFrameInfo(\n data=self,\n memory_usage=memory_usage,\n )\n info.render(\n buf=buf,\n max_cols=max_cols,\n verbose=verbose,\n show_counts=show_counts,\n )\n\n def memory_usage(self, index=True, deep=False) -> Series:\n \"\"\"\n Return the memory usage of each column in bytes.\n\n The memory usage can optionally include the contribution of\n the index and elements of `object` dtype.\n\n This value is displayed in `DataFrame.info` by default. This can be\n suppressed by setting ``pandas.options.display.memory_usage`` to False.\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of the DataFrame's\n index in returned Series. If ``index=True``, the memory usage of\n the index is the first item in the output.\n deep : bool, default False\n If True, introspect the data deeply by interrogating\n `object` dtypes for system-level memory consumption, and include\n it in the returned values.\n\n Returns\n -------\n Series\n A Series whose index is the original column names and whose values\n is the memory usage of each column in bytes.\n\n See Also\n --------\n numpy.ndarray.nbytes : Total bytes consumed by the elements of an\n ndarray.\n Series.memory_usage : Bytes consumed by a Series.\n Categorical : Memory-efficient array for string values with\n many repeated values.\n DataFrame.info : Concise summary of a DataFrame.\n\n Examples\n --------\n >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']\n >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))\n ... for t in dtypes])\n >>> df = pd.DataFrame(data)\n >>> df.head()\n int64 float64 complex128 object bool\n 0 1 1.0 1.0+0.0j 1 True\n 1 1 1.0 1.0+0.0j 1 True\n 2 1 1.0 1.0+0.0j 1 True\n 3 1 1.0 1.0+0.0j 1 True\n 4 1 1.0 1.0+0.0j 1 True\n\n >>> df.memory_usage()\n Index 128\n int64 40000\n float64 40000\n complex128 80000\n object 40000\n bool 5000\n dtype: int64\n\n >>> df.memory_usage(index=False)\n int64 40000\n float64 40000\n complex128 80000\n object 40000\n bool 5000\n dtype: int64\n\n The memory footprint of `object` dtype columns is ignored by default:\n\n >>> df.memory_usage(deep=True)\n Index 128\n int64 40000\n float64 40000\n complex128 80000\n object 180000\n bool 5000\n dtype: int64\n\n Use a Categorical for efficient storage of an object-dtype column with\n many repeated values.\n\n >>> df['object'].astype('category').memory_usage(deep=True)\n 5244\n \"\"\"\n result = self._constructor_sliced(\n [c.memory_usage(index=False, deep=deep) for col, c in self.items()],\n index=self.columns,\n )\n if index:\n result = self._constructor_sliced(\n self.index.memory_usage(deep=deep), index=[\"Index\"]\n ).append(result)\n return result\n\n def transpose(self, *args, copy: bool = False) -> DataFrame:\n \"\"\"\n Transpose index and columns.\n\n Reflect the DataFrame over its main diagonal by writing rows as columns\n and vice-versa. The property :attr:`.T` is an accessor to the method\n :meth:`transpose`.\n\n Parameters\n ----------\n *args : tuple, optional\n Accepted for compatibility with NumPy.\n copy : bool, default False\n Whether to copy the data after transposing, even for DataFrames\n with a single dtype.\n\n Note that a copy is always required for mixed dtype DataFrames,\n or for DataFrames with any extension types.\n\n Returns\n -------\n DataFrame\n The transposed DataFrame.\n\n See Also\n --------\n numpy.transpose : Permute the dimensions of a given array.\n\n Notes\n -----\n Transposing a DataFrame with mixed dtypes will result in a homogeneous\n DataFrame with the `object` dtype. In such a case, a copy of the data\n is always made.\n\n Examples\n --------\n **Square DataFrame with homogeneous dtype**\n\n >>> d1 = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df1 = pd.DataFrame(data=d1)\n >>> df1\n col1 col2\n 0 1 3\n 1 2 4\n\n >>> df1_transposed = df1.T # or df1.transpose()\n >>> df1_transposed\n 0 1\n col1 1 2\n col2 3 4\n\n When the dtype is homogeneous in the original DataFrame, we get a\n transposed DataFrame with the same dtype:\n\n >>> df1.dtypes\n col1 int64\n col2 int64\n dtype: object\n >>> df1_transposed.dtypes\n 0 int64\n 1 int64\n dtype: object\n\n **Non-square DataFrame with mixed dtypes**\n\n >>> d2 = {'name': ['Alice', 'Bob'],\n ... 'score': [9.5, 8],\n ... 'employed': [False, True],\n ... 'kids': [0, 0]}\n >>> df2 = pd.DataFrame(data=d2)\n >>> df2\n name score employed kids\n 0 Alice 9.5 False 0\n 1 Bob 8.0 True 0\n\n >>> df2_transposed = df2.T # or df2.transpose()\n >>> df2_transposed\n 0 1\n name Alice Bob\n score 9.5 8.0\n employed False True\n kids 0 0\n\n When the DataFrame has mixed dtypes, we get a transposed DataFrame with\n the `object` dtype:\n\n >>> df2.dtypes\n name object\n score float64\n employed bool\n kids int64\n dtype: object\n >>> df2_transposed.dtypes\n 0 object\n 1 object\n dtype: object\n \"\"\"\n nv.validate_transpose(args, {})\n # construct the args\n\n dtypes = list(self.dtypes)\n if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):\n # We have EAs with the same dtype. We can preserve that dtype in transpose.\n dtype = dtypes[0]\n arr_type = dtype.construct_array_type()\n values = self.values\n\n new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]\n result = self._constructor(\n dict(zip(self.index, new_values)), index=self.columns\n )\n\n else:\n new_values = self.values.T\n if copy:\n new_values = new_values.copy()\n result = self._constructor(\n new_values, index=self.columns, columns=self.index\n )\n\n return result.__finalize__(self, method=\"transpose\")\n\n @property\n def T(self) -> DataFrame:\n return self.transpose()\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n def _ixs(self, i: int, axis: int = 0):\n \"\"\"\n Parameters\n ----------\n i : int\n axis : int\n\n Notes\n -----\n If slice passed, the resulting data will be a view.\n \"\"\"\n # irow\n if axis == 0:\n new_values = self._mgr.fast_xs(i)\n\n # if we are a copy, mark as such\n copy = isinstance(new_values, np.ndarray) and new_values.base is None\n result = self._constructor_sliced(\n new_values,\n index=self.columns,\n name=self.index[i],\n dtype=new_values.dtype,\n )\n result._set_is_copy(self, copy=copy)\n return result\n\n # icol\n else:\n label = self.columns[i]\n\n values = self._mgr.iget(i)\n result = self._box_col_values(values, i)\n\n # this is a cached value, mark it so\n result._set_as_cached(label, self)\n\n return result\n\n def _get_column_array(self, i: int) -> ArrayLike:\n \"\"\"\n Get the values of the i'th column (ndarray or ExtensionArray, as stored\n in the Block)\n \"\"\"\n return self._mgr.iget_values(i)\n\n def _iter_column_arrays(self) -> Iterator[ArrayLike]:\n \"\"\"\n Iterate over the arrays of all columns in order.\n This returns the values as stored in the Block (ndarray or ExtensionArray).\n \"\"\"\n for i in range(len(self.columns)):\n yield self._get_column_array(i)\n\n def __getitem__(self, key):\n key = lib.item_from_zerodim(key)\n key = com.apply_if_callable(key, self)\n\n if is_hashable(key):\n # shortcut if the key is in columns\n if self.columns.is_unique and key in self.columns:\n if isinstance(self.columns, MultiIndex):\n return self._getitem_multilevel(key)\n return self._get_item_cache(key)\n\n # Do we have a slicer (on rows)?\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n if isinstance(indexer, np.ndarray):\n indexer = lib.maybe_indices_to_slice(\n indexer.astype(np.intp, copy=False), len(self)\n )\n # either we have a slice or we have a string that can be converted\n # to a slice for partial-string date indexing\n return self._slice(indexer, axis=0)\n\n # Do we have a (boolean) DataFrame?\n if isinstance(key, DataFrame):\n return self.where(key)\n\n # Do we have a (boolean) 1d indexer?\n if com.is_bool_indexer(key):\n return self._getitem_bool_array(key)\n\n # We are left with two options: a single key, and a collection of keys,\n # We interpret tuples as collections only for non-MultiIndex\n is_single_key = isinstance(key, tuple) or not is_list_like(key)\n\n if is_single_key:\n if self.columns.nlevels > 1:\n return self._getitem_multilevel(key)\n indexer = self.columns.get_loc(key)\n if is_integer(indexer):\n indexer = [indexer]\n else:\n if is_iterator(key):\n key = list(key)\n indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]\n\n # take() does not accept boolean indexers\n if getattr(indexer, \"dtype\", None) == bool:\n indexer = np.where(indexer)[0]\n\n data = self._take_with_is_copy(indexer, axis=1)\n\n if is_single_key:\n # What does looking for a single key in a non-unique index return?\n # The behavior is inconsistent. It returns a Series, except when\n # - the key itself is repeated (test on data.shape, #9519), or\n # - we have a MultiIndex on columns (test on self.columns, #21309)\n if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):\n # GH#26490 using data[key] can cause RecursionError\n data = data._get_item_cache(key)\n\n return data\n\n def _getitem_bool_array(self, key):\n # also raises Exception if object array with NA values\n # warning here just in case -- previously __setitem__ was\n # reindexing but __getitem__ was not; it seems more reasonable to\n # go with the __setitem__ behavior since that is more consistent\n # with all other indexing behavior\n if isinstance(key, Series) and not key.index.equals(self.index):\n warnings.warn(\n \"Boolean Series key will be reindexed to match DataFrame index.\",\n UserWarning,\n stacklevel=3,\n )\n elif len(key) != len(self.index):\n raise ValueError(\n f\"Item wrong length {len(key)} instead of {len(self.index)}.\"\n )\n\n # check_bool_indexer will throw exception if Series key cannot\n # be reindexed to match DataFrame rows\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n return self._take_with_is_copy(indexer, axis=0)\n\n def _getitem_multilevel(self, key):\n # self.columns is a MultiIndex\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, np.ndarray)):\n new_columns = self.columns[loc]\n result_columns = maybe_droplevels(new_columns, key)\n if self._is_mixed_type:\n result = self.reindex(columns=new_columns)\n result.columns = result_columns\n else:\n new_values = self.values[:, loc]\n result = self._constructor(\n new_values, index=self.index, columns=result_columns\n )\n result = result.__finalize__(self)\n\n # If there is only one column being returned, and its name is\n # either an empty string, or a tuple with an empty string as its\n # first element, then treat the empty string as a placeholder\n # and return the column as if the user had provided that empty\n # string in the key. If the result is a Series, exclude the\n # implied empty string from its name.\n if len(result.columns) == 1:\n top = result.columns[0]\n if isinstance(top, tuple):\n top = top[0]\n if top == \"\":\n result = result[\"\"]\n if isinstance(result, Series):\n result = self._constructor_sliced(\n result, index=self.index, name=key\n )\n\n result._set_is_copy(self)\n return result\n else:\n # loc is neither a slice nor ndarray, so must be an int\n return self._ixs(loc, axis=1)\n\n def _get_value(self, index, col, takeable: bool = False):\n \"\"\"\n Quickly retrieve single value at passed column and index.\n\n Parameters\n ----------\n index : row label\n col : column label\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n scalar\n \"\"\"\n if takeable:\n series = self._ixs(col, axis=1)\n return series._values[index]\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n\n try:\n loc = engine.get_loc(index)\n return series._values[loc]\n except KeyError:\n # GH 20629\n if self.index.nlevels > 1:\n # partial indexing forbidden\n raise\n\n # we cannot handle direct indexing\n # use positional\n col = self.columns.get_loc(col)\n index = self.index.get_loc(index)\n return self._get_value(index, col, takeable=True)\n\n def __setitem__(self, key, value):\n key = com.apply_if_callable(key, self)\n\n # see if we can slice the rows\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n # either we have a slice or we have a string that can be converted\n # to a slice for partial-string date indexing\n return self._setitem_slice(indexer, value)\n\n if isinstance(key, DataFrame) or getattr(key, \"ndim\", None) == 2:\n self._setitem_frame(key, value)\n elif isinstance(key, (Series, np.ndarray, list, Index)):\n self._setitem_array(key, value)\n elif isinstance(value, DataFrame):\n self._set_item_frame_value(key, value)\n else:\n # set column\n self._set_item(key, value)\n\n def _setitem_slice(self, key: slice, value):\n # NB: we can't just use self.loc[key] = value because that\n # operates on labels and we need to operate positional for\n # backwards-compat, xref GH#31469\n self._check_setitem_copy()\n self.iloc[key] = value\n\n def _setitem_array(self, key, value):\n # also raises Exception if object array with NA values\n if com.is_bool_indexer(key):\n if len(key) != len(self.index):\n raise ValueError(\n f\"Item wrong length {len(key)} instead of {len(self.index)}!\"\n )\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n self._check_setitem_copy()\n self.iloc[indexer] = value\n else:\n if isinstance(value, DataFrame):\n if len(value.columns) != len(key):\n raise ValueError(\"Columns must be same length as key\")\n for k1, k2 in zip(key, value.columns):\n self[k1] = value[k2]\n else:\n self.loc._ensure_listlike_indexer(key, axis=1, value=value)\n indexer = self.loc._get_listlike_indexer(\n key, axis=1, raise_missing=False\n )[1]\n self._check_setitem_copy()\n self.iloc[:, indexer] = value\n\n def _setitem_frame(self, key, value):\n # support boolean setting with DataFrame input, e.g.\n # df[df > df2] = 0\n if isinstance(key, np.ndarray):\n if key.shape != self.shape:\n raise ValueError(\"Array conditional must be same shape as self\")\n key = self._constructor(key, **self._construct_axes_dict())\n\n if key.size and not is_bool_dtype(key.values):\n raise TypeError(\n \"Must pass DataFrame or 2-d ndarray with boolean values only\"\n )\n\n self._check_inplace_setting(value)\n self._check_setitem_copy()\n self._where(-key, value, inplace=True)\n\n def _set_item_frame_value(self, key, value: DataFrame) -> None:\n self._ensure_valid_index(value)\n\n # align right-hand-side columns if self.columns\n # is multi-index and self[key] is a sub-frame\n if isinstance(self.columns, MultiIndex) and key in self.columns:\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, Series, np.ndarray, Index)):\n cols = maybe_droplevels(self.columns[loc], key)\n if len(cols) and not cols.equals(value.columns):\n value = value.reindex(cols, axis=1)\n\n # now align rows\n value = _reindex_for_setitem(value, self.index)\n value = value.T\n self._set_item_mgr(key, value)\n\n def _iset_item_mgr(self, loc: int, value) -> None:\n self._mgr.iset(loc, value)\n self._clear_item_cache()\n\n def _set_item_mgr(self, key, value):\n value = _maybe_atleast_2d(value)\n\n try:\n loc = self._info_axis.get_loc(key)\n except KeyError:\n # This item wasn't present, just insert at end\n self._mgr.insert(len(self._info_axis), key, value)\n else:\n self._iset_item_mgr(loc, value)\n\n # check if we are modifying a copy\n # try to set first as we want an invalid\n # value exception to occur first\n if len(self):\n self._check_setitem_copy()\n\n def _iset_item(self, loc: int, value):\n value = self._sanitize_column(value)\n value = _maybe_atleast_2d(value)\n self._iset_item_mgr(loc, value)\n\n # check if we are modifying a copy\n # try to set first as we want an invalid\n # value exception to occur first\n if len(self):\n self._check_setitem_copy()\n\n def _set_item(self, key, value):\n \"\"\"\n Add series to DataFrame in specified column.\n\n If series is a numpy-array (not a Series/TimeSeries), it must be the\n same length as the DataFrames index or an error will be thrown.\n\n Series/TimeSeries will be conformed to the DataFrames index to\n ensure homogeneity.\n \"\"\"\n value = self._sanitize_column(value)\n\n if (\n key in self.columns\n and value.ndim == 1\n and not is_extension_array_dtype(value)\n ):\n # broadcast across multiple columns if necessary\n if not self.columns.is_unique or isinstance(self.columns, MultiIndex):\n existing_piece = self[key]\n if isinstance(existing_piece, DataFrame):\n value = np.tile(value, (len(existing_piece.columns), 1))\n\n self._set_item_mgr(key, value)\n\n def _set_value(self, index, col, value, takeable: bool = False):\n \"\"\"\n Put single value at passed column and index.\n\n Parameters\n ----------\n index : row label\n col : column label\n value : scalar\n takeable : interpret the index/col as indexers, default False\n \"\"\"\n try:\n if takeable is True:\n series = self._ixs(col, axis=1)\n series._set_value(index, value, takeable=True)\n return\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n loc = engine.get_loc(index)\n validate_numeric_casting(series.dtype, value)\n\n series._values[loc] = value\n # Note: trying to use series._set_value breaks tests in\n # tests.frame.indexing.test_indexing and tests.indexing.test_partial\n except (KeyError, TypeError):\n # set using a non-recursive method & reset the cache\n if takeable:\n self.iloc[index, col] = value\n else:\n self.loc[index, col] = value\n self._item_cache.pop(col, None)\n\n def _ensure_valid_index(self, value):\n \"\"\"\n Ensure that if we don't have an index, that we can create one from the\n passed value.\n \"\"\"\n # GH5632, make sure that we are a Series convertible\n if not len(self.index) and is_list_like(value) and len(value):\n if not isinstance(value, DataFrame):\n try:\n value = Series(value)\n except (ValueError, NotImplementedError, TypeError) as err:\n raise ValueError(\n \"Cannot set a frame with no defined index \"\n \"and a value that cannot be converted to a Series\"\n ) from err\n\n # GH31368 preserve name of index\n index_copy = value.index.copy()\n if self.index.name is not None:\n index_copy.name = self.index.name\n\n self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)\n\n def _box_col_values(self, values, loc: int) -> Series:\n \"\"\"\n Provide boxed values for a column.\n \"\"\"\n # Lookup in columns so that if e.g. a str datetime was passed\n # we attach the Timestamp object as the name.\n name = self.columns[loc]\n klass = self._constructor_sliced\n return klass(values, index=self.index, name=name, fastpath=True)\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n def query(self, expr: str, inplace: bool = False, **kwargs):\n \"\"\"\n Query the columns of a DataFrame with a boolean expression.\n\n Parameters\n ----------\n expr : str\n The query string to evaluate.\n\n You can refer to variables\n in the environment by prefixing them with an '@' character like\n ``@a + b``.\n\n You can refer to column names that are not valid Python variable names\n by surrounding them in backticks. Thus, column names containing spaces\n or punctuations (besides underscores) or starting with digits must be\n surrounded by backticks. (For example, a column named \"Area (cm^2) would\n be referenced as `Area (cm^2)`). Column names which are Python keywords\n (like \"list\", \"for\", \"import\", etc) cannot be used.\n\n For example, if one of your columns is called ``a a`` and you want\n to sum it with ``b``, your query should be ```a a` + b``.\n\n .. versionadded:: 0.25.0\n Backtick quoting introduced.\n\n .. versionadded:: 1.0.0\n Expanding functionality of backtick quoting for more than only spaces.\n\n inplace : bool\n Whether the query should modify the data in place or return\n a modified copy.\n **kwargs\n See the documentation for :func:`eval` for complete details\n on the keyword arguments accepted by :meth:`DataFrame.query`.\n\n Returns\n -------\n DataFrame or None\n DataFrame resulting from the provided query expression or\n None if ``inplace=True``.\n\n See Also\n --------\n eval : Evaluate a string describing operations on\n DataFrame columns.\n DataFrame.eval : Evaluate a string describing operations on\n DataFrame columns.\n\n Notes\n -----\n The result of the evaluation of this expression is first passed to\n :attr:`DataFrame.loc` and if that fails because of a\n multidimensional key (e.g., a DataFrame) then the result will be passed\n to :meth:`DataFrame.__getitem__`.\n\n This method uses the top-level :func:`eval` function to\n evaluate the passed query.\n\n The :meth:`~pandas.DataFrame.query` method uses a slightly\n modified Python syntax by default. For example, the ``&`` and ``|``\n (bitwise) operators have the precedence of their boolean cousins,\n :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,\n however the semantics are different.\n\n You can change the semantics of the expression by passing the keyword\n argument ``parser='python'``. This enforces the same semantics as\n evaluation in Python space. Likewise, you can pass ``engine='python'``\n to evaluate an expression using Python itself as a backend. This is not\n recommended as it is inefficient compared to using ``numexpr`` as the\n engine.\n\n The :attr:`DataFrame.index` and\n :attr:`DataFrame.columns` attributes of the\n :class:`~pandas.DataFrame` instance are placed in the query namespace\n by default, which allows you to treat both the index and columns of the\n frame as a column in the frame.\n The identifier ``index`` is used for the frame index; you can also\n use the name of the index to identify it in a query. Please note that\n Python keywords may not be used as identifiers.\n\n For further details and examples see the ``query`` documentation in\n :ref:`indexing <indexing.query>`.\n\n *Backtick quoted variables*\n\n Backtick quoted variables are parsed as literal Python code and\n are converted internally to a Python valid identifier.\n This can lead to the following problems.\n\n During parsing a number of disallowed characters inside the backtick\n quoted string are replaced by strings that are allowed as a Python identifier.\n These characters include all operators in Python, the space character, the\n question mark, the exclamation mark, the dollar sign, and the euro sign.\n For other characters that fall outside the ASCII range (U+0001..U+007F)\n and those that are not further specified in PEP 3131,\n the query parser will raise an error.\n This excludes whitespace different than the space character,\n but also the hashtag (as it is used for comments) and the backtick\n itself (backtick can also not be escaped).\n\n In a special case, quotes that make a pair around a backtick can\n confuse the parser.\n For example, ```it's` > `that's``` will raise an error,\n as it forms a quoted string (``'s > `that'``) with a backtick inside.\n\n See also the Python documentation about lexical analysis\n (https://docs.python.org/3/reference/lexical_analysis.html)\n in combination with the source code in :mod:`pandas.core.computation.parsing`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(1, 6),\n ... 'B': range(10, 0, -2),\n ... 'C C': range(10, 5, -1)})\n >>> df\n A B C C\n 0 1 10 10\n 1 2 8 9\n 2 3 6 8\n 3 4 4 7\n 4 5 2 6\n >>> df.query('A > B')\n A B C C\n 4 5 2 6\n\n The previous expression is equivalent to\n\n >>> df[df.A > df.B]\n A B C C\n 4 5 2 6\n\n For columns with spaces in their name, you can use backtick quoting.\n\n >>> df.query('B == `C C`')\n A B C C\n 0 1 10 10\n\n The previous expression is equivalent to\n\n >>> df[df.B == df['C C']]\n A B C C\n 0 1 10 10\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if not isinstance(expr, str):\n msg = f\"expr must be a string to be evaluated, {type(expr)} given\"\n raise ValueError(msg)\n kwargs[\"level\"] = kwargs.pop(\"level\", 0) + 1\n kwargs[\"target\"] = None\n res = self.eval(expr, **kwargs)\n\n try:\n result = self.loc[res]\n except ValueError:\n # when res is multi-dimensional loc raises, but this is sometimes a\n # valid query\n result = self[res]\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def eval(self, expr: str, inplace: bool = False, **kwargs):\n \"\"\"\n Evaluate a string describing operations on DataFrame columns.\n\n Operates on columns only, not specific rows or elements. This allows\n `eval` to run arbitrary code, which can make you vulnerable to code\n injection if you pass user input to this function.\n\n Parameters\n ----------\n expr : str\n The expression string to evaluate.\n inplace : bool, default False\n If the expression contains an assignment, whether to perform the\n operation inplace and mutate the existing DataFrame. Otherwise,\n a new DataFrame is returned.\n **kwargs\n See the documentation for :func:`eval` for complete details\n on the keyword arguments accepted by\n :meth:`~pandas.DataFrame.query`.\n\n Returns\n -------\n ndarray, scalar, pandas object, or None\n The result of the evaluation or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.query : Evaluates a boolean expression to query the columns\n of a frame.\n DataFrame.assign : Can evaluate an expression or function to create new\n values for a column.\n eval : Evaluate a Python expression as a string using various\n backends.\n\n Notes\n -----\n For more details see the API documentation for :func:`~eval`.\n For detailed examples see :ref:`enhancing performance with eval\n <enhancingperf.eval>`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n >>> df.eval('A + B')\n 0 11\n 1 10\n 2 9\n 3 8\n 4 7\n dtype: int64\n\n Assignment is allowed though by default the original DataFrame is not\n modified.\n\n >>> df.eval('C = A + B')\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n\n Use ``inplace=True`` to modify the original DataFrame.\n\n >>> df.eval('C = A + B', inplace=True)\n >>> df\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n\n Multiple columns can be assigned to using multi-line expressions:\n\n >>> df.eval(\n ... '''\n ... C = A + B\n ... D = A - B\n ... '''\n ... )\n A B C D\n 0 1 10 11 -9\n 1 2 8 10 -6\n 2 3 6 9 -3\n 3 4 4 8 0\n 4 5 2 7 3\n \"\"\"\n from pandas.core.computation.eval import eval as _eval\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n resolvers = kwargs.pop(\"resolvers\", None)\n kwargs[\"level\"] = kwargs.pop(\"level\", 0) + 1\n if resolvers is None:\n index_resolvers = self._get_index_resolvers()\n column_resolvers = self._get_cleaned_column_resolvers()\n resolvers = column_resolvers, index_resolvers\n if \"target\" not in kwargs:\n kwargs[\"target\"] = self\n kwargs[\"resolvers\"] = kwargs.get(\"resolvers\", ()) + tuple(resolvers)\n\n return _eval(expr, inplace=inplace, **kwargs)\n\n def select_dtypes(self, include=None, exclude=None) -> DataFrame:\n \"\"\"\n Return a subset of the DataFrame's columns based on the column dtypes.\n\n Parameters\n ----------\n include, exclude : scalar or list-like\n A selection of dtypes or strings to be included/excluded. At least\n one of these parameters must be supplied.\n\n Returns\n -------\n DataFrame\n The subset of the frame including the dtypes in ``include`` and\n excluding the dtypes in ``exclude``.\n\n Raises\n ------\n ValueError\n * If both of ``include`` and ``exclude`` are empty\n * If ``include`` and ``exclude`` have overlapping elements\n * If any kind of string dtype is passed in.\n\n See Also\n --------\n DataFrame.dtypes: Return Series with the data type of each column.\n\n Notes\n -----\n * To select all *numeric* types, use ``np.number`` or ``'number'``\n * To select strings you must use the ``object`` dtype, but note that\n this will return *all* object dtype columns\n * See the `numpy dtype hierarchy\n <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__\n * To select datetimes, use ``np.datetime64``, ``'datetime'`` or\n ``'datetime64'``\n * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or\n ``'timedelta64'``\n * To select Pandas categorical dtypes, use ``'category'``\n * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in\n 0.20.0) or ``'datetime64[ns, tz]'``\n\n Examples\n --------\n >>> df = pd.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n 2 1 True 1.0\n 3 2 False 2.0\n 4 1 True 1.0\n 5 2 False 2.0\n\n >>> df.select_dtypes(include='bool')\n b\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n\n >>> df.select_dtypes(include=['float64'])\n c\n 0 1.0\n 1 2.0\n 2 1.0\n 3 2.0\n 4 1.0\n 5 2.0\n\n >>> df.select_dtypes(exclude=['int64'])\n b c\n 0 True 1.0\n 1 False 2.0\n 2 True 1.0\n 3 False 2.0\n 4 True 1.0\n 5 False 2.0\n \"\"\"\n if not is_list_like(include):\n include = (include,) if include is not None else ()\n if not is_list_like(exclude):\n exclude = (exclude,) if exclude is not None else ()\n\n selection = (frozenset(include), frozenset(exclude))\n\n if not any(selection):\n raise ValueError(\"at least one of include or exclude must be nonempty\")\n\n # convert the myriad valid dtypes object to a single representation\n include = frozenset(infer_dtype_from_object(x) for x in include)\n exclude = frozenset(infer_dtype_from_object(x) for x in exclude)\n for dtypes in (include, exclude):\n invalidate_string_dtypes(dtypes)\n\n # can't both include AND exclude!\n if not include.isdisjoint(exclude):\n raise ValueError(f\"include and exclude overlap on {(include & exclude)}\")\n\n # We raise when both include and exclude are empty\n # Hence, we can just shrink the columns we want to keep\n keep_these = np.full(self.shape[1], True)\n\n def extract_unique_dtypes_from_dtypes_set(\n dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray\n ) -> List[Dtype]:\n extracted_dtypes = [\n unique_dtype\n for unique_dtype in unique_dtypes\n if (\n issubclass(\n unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type]\n )\n or (\n np.number in dtypes_set\n and getattr(unique_dtype, \"_is_numeric\", False)\n )\n )\n ]\n return extracted_dtypes\n\n unique_dtypes = self.dtypes.unique()\n\n if include:\n included_dtypes = extract_unique_dtypes_from_dtypes_set(\n include, unique_dtypes\n )\n keep_these &= self.dtypes.isin(included_dtypes)\n\n if exclude:\n excluded_dtypes = extract_unique_dtypes_from_dtypes_set(\n exclude, unique_dtypes\n )\n keep_these &= ~self.dtypes.isin(excluded_dtypes)\n\n return self.iloc[:, keep_these.values]\n\n def insert(self, loc, column, value, allow_duplicates: bool = False) -> None:\n \"\"\"\n Insert column into DataFrame at specified location.\n\n Raises a ValueError if `column` is already contained in the DataFrame,\n unless `allow_duplicates` is set to True.\n\n Parameters\n ----------\n loc : int\n Insertion index. Must verify 0 <= loc <= len(columns).\n column : str, number, or hashable object\n Label of the inserted column.\n value : int, Series, or array-like\n allow_duplicates : bool, optional\n\n See Also\n --------\n Index.insert : Insert new item by index.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n >>> df.insert(1, \"newcol\", [99, 99])\n >>> df\n col1 newcol col2\n 0 1 99 3\n 1 2 99 4\n >>> df.insert(0, \"col1\", [100, 100], allow_duplicates=True)\n >>> df\n col1 col1 newcol col2\n 0 100 1 99 3\n 1 100 2 99 4\n \"\"\"\n if allow_duplicates and not self.flags.allows_duplicate_labels:\n raise ValueError(\n \"Cannot specify 'allow_duplicates=True' when \"\n \"'self.flags.allows_duplicate_labels' is False.\"\n )\n value = self._sanitize_column(value)\n value = _maybe_atleast_2d(value)\n self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)\n\n def assign(self, **kwargs) -> DataFrame:\n r\"\"\"\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable or Series}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though pandas doesn't check it).\n If the values are not callable, (e.g. a Series, scalar, or array),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible.\n Later items in '\\*\\*kwargs' may refer to newly created or modified\n columns in 'df'; items are computed and assigned into 'df' in order.\n\n Examples\n --------\n >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence:\n\n >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n You can create multiple columns within the same assign where one\n of the columns depends on another one defined within the same assign:\n\n >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,\n ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)\n temp_c temp_f temp_k\n Portland 17.0 62.6 290.15\n Berkeley 25.0 77.0 298.15\n \"\"\"\n data = self.copy()\n\n for k, v in kwargs.items():\n data[k] = com.apply_if_callable(v, data)\n return data\n\n def _sanitize_column(self, value):\n \"\"\"\n Ensures new columns (which go into the BlockManager as new blocks) are\n always copied and converted into an array.\n\n Parameters\n ----------\n value : scalar, Series, or array-like\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n self._ensure_valid_index(value)\n\n # We should never get here with DataFrame value\n if isinstance(value, Series):\n value = _reindex_for_setitem(value, self.index)\n\n elif isinstance(value, ExtensionArray):\n # Explicitly copy here, instead of in sanitize_index,\n # as sanitize_index won't copy an EA, even with copy=True\n value = value.copy()\n value = sanitize_index(value, self.index)\n\n elif isinstance(value, Index) or is_sequence(value):\n\n # turn me into an ndarray\n value = sanitize_index(value, self.index)\n if not isinstance(value, (np.ndarray, Index)):\n if isinstance(value, list) and len(value) > 0:\n value = maybe_convert_platform(value)\n else:\n value = com.asarray_tuplesafe(value)\n elif value.ndim == 2:\n value = value.copy().T\n elif isinstance(value, Index):\n value = value.copy(deep=True)\n else:\n value = value.copy()\n\n # possibly infer to datetimelike\n if is_object_dtype(value.dtype):\n value = maybe_infer_to_datetimelike(value)\n\n else:\n value = construct_1d_arraylike_from_scalar(value, len(self), dtype=None)\n\n return value\n\n @property\n def _series(self):\n return {\n item: Series(\n self._mgr.iget(idx), index=self.index, name=item, fastpath=True\n )\n for idx, item in enumerate(self.columns)\n }\n\n def lookup(self, row_labels, col_labels) -> np.ndarray:\n \"\"\"\n Label-based \"fancy indexing\" function for DataFrame.\n Given equal-length arrays of row and column labels, return an\n array of the values corresponding to each (row, col) pair.\n\n .. deprecated:: 1.2.0\n DataFrame.lookup is deprecated,\n use DataFrame.melt and DataFrame.loc instead.\n For an example see :meth:`~pandas.DataFrame.lookup`\n in the user guide.\n\n Parameters\n ----------\n row_labels : sequence\n The row labels to use for lookup.\n col_labels : sequence\n The column labels to use for lookup.\n\n Returns\n -------\n numpy.ndarray\n The found values.\n \"\"\"\n msg = (\n \"The 'lookup' method is deprecated and will be\"\n \"removed in a future version.\"\n \"You can use DataFrame.melt and DataFrame.loc\"\n \"as a substitute.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n n = len(row_labels)\n if n != len(col_labels):\n raise ValueError(\"Row labels must have same size as column labels\")\n if not (self.index.is_unique and self.columns.is_unique):\n # GH#33041\n raise ValueError(\"DataFrame.lookup requires unique index and columns\")\n\n thresh = 1000\n if not self._is_mixed_type or n > thresh:\n values = self.values\n ridx = self.index.get_indexer(row_labels)\n cidx = self.columns.get_indexer(col_labels)\n if (ridx == -1).any():\n raise KeyError(\"One or more row labels was not found\")\n if (cidx == -1).any():\n raise KeyError(\"One or more column labels was not found\")\n flat_index = ridx * len(self.columns) + cidx\n result = values.flat[flat_index]\n else:\n result = np.empty(n, dtype=\"O\")\n for i, (r, c) in enumerate(zip(row_labels, col_labels)):\n result[i] = self._get_value(r, c)\n\n if is_object_dtype(result):\n result = lib.maybe_convert_objects(result)\n\n return result\n\n # ----------------------------------------------------------------------\n # Reindexing and alignment\n\n def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):\n frame = self\n\n columns = axes[\"columns\"]\n if columns is not None:\n frame = frame._reindex_columns(\n columns, method, copy, level, fill_value, limit, tolerance\n )\n\n index = axes[\"index\"]\n if index is not None:\n frame = frame._reindex_index(\n index, method, copy, level, fill_value, limit, tolerance\n )\n\n return frame\n\n def _reindex_index(\n self,\n new_index,\n method,\n copy: bool,\n level: Level,\n fill_value=np.nan,\n limit=None,\n tolerance=None,\n ):\n new_index, indexer = self.index.reindex(\n new_index, method=method, level=level, limit=limit, tolerance=tolerance\n )\n return self._reindex_with_indexers(\n {0: [new_index, indexer]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=False,\n )\n\n def _reindex_columns(\n self,\n new_columns,\n method,\n copy: bool,\n level: Level,\n fill_value=None,\n limit=None,\n tolerance=None,\n ):\n new_columns, indexer = self.columns.reindex(\n new_columns, method=method, level=level, limit=limit, tolerance=tolerance\n )\n return self._reindex_with_indexers(\n {1: [new_columns, indexer]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=False,\n )\n\n def _reindex_multi(self, axes, copy: bool, fill_value) -> DataFrame:\n \"\"\"\n We are guaranteed non-Nones in the axes.\n \"\"\"\n new_index, row_indexer = self.index.reindex(axes[\"index\"])\n new_columns, col_indexer = self.columns.reindex(axes[\"columns\"])\n\n if row_indexer is not None and col_indexer is not None:\n indexer = row_indexer, col_indexer\n new_values = algorithms.take_2d_multi(\n self.values, indexer, fill_value=fill_value\n )\n return self._constructor(new_values, index=new_index, columns=new_columns)\n else:\n return self._reindex_with_indexers(\n {0: [new_index, row_indexer], 1: [new_columns, col_indexer]},\n copy=copy,\n fill_value=fill_value,\n )\n\n @doc(NDFrame.align, **_shared_doc_kwargs)\n def align(\n self,\n other,\n join: str = \"outer\",\n axis: Optional[Axis] = None,\n level: Optional[Level] = None,\n copy: bool = True,\n fill_value=None,\n method: Optional[str] = None,\n limit=None,\n fill_axis: Axis = 0,\n broadcast_axis: Optional[Axis] = None,\n ) -> DataFrame:\n return super().align(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n broadcast_axis=broadcast_axis,\n )\n\n @Appender(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n\n Change the row labels.\n\n >>> df.set_axis(['a', 'b', 'c'], axis='index')\n A B\n a 1 4\n b 2 5\n c 3 6\n\n Change the column labels.\n\n >>> df.set_axis(['I', 'II'], axis='columns')\n I II\n 0 1 4\n 1 2 5\n 2 3 6\n\n Now, update the labels inplace.\n\n >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)\n >>> df\n i ii\n 0 1 4\n 1 2 5\n 2 3 6\n \"\"\"\n )\n @Substitution(\n **_shared_doc_kwargs,\n extended_summary_sub=\" column or\",\n axis_description_sub=\", and 1 identifies the columns\",\n see_also_sub=\" or columns\",\n )\n @Appender(NDFrame.set_axis.__doc__)\n def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):\n return super().set_axis(labels, axis=axis, inplace=inplace)\n\n @Substitution(**_shared_doc_kwargs)\n @Appender(NDFrame.reindex.__doc__)\n @rewrite_axis_style_signature(\n \"labels\",\n [\n (\"method\", None),\n (\"copy\", True),\n (\"level\", None),\n (\"fill_value\", np.nan),\n (\"limit\", None),\n (\"tolerance\", None),\n ],\n )\n def reindex(self, *args, **kwargs) -> DataFrame:\n axes = validate_axis_style_args(self, args, kwargs, \"labels\", \"reindex\")\n kwargs.update(axes)\n # Pop these, since the values are in `kwargs` under different names\n kwargs.pop(\"axis\", None)\n kwargs.pop(\"labels\", None)\n return super().reindex(**kwargs)\n\n def drop(\n self,\n labels=None,\n axis: Axis = 0,\n index=None,\n columns=None,\n level: Optional[Level] = None,\n inplace: bool = False,\n errors: str = \"raise\",\n ):\n \"\"\"\n Drop specified labels from rows or columns.\n\n Remove rows or columns by specifying label names and corresponding\n axis, or by specifying directly index or column names. When using a\n multi-index, labels on different levels can be removed by specifying\n the level.\n\n Parameters\n ----------\n labels : single label or list-like\n Index or column labels to drop.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Whether to drop labels from the index (0 or 'index') or\n columns (1 or 'columns').\n index : single label or list-like\n Alternative to specifying axis (``labels, axis=0``\n is equivalent to ``index=labels``).\n columns : single label or list-like\n Alternative to specifying axis (``labels, axis=1``\n is equivalent to ``columns=labels``).\n level : int or level name, optional\n For MultiIndex, level from which the labels will be removed.\n inplace : bool, default False\n If False, return a copy. Otherwise, do operation\n inplace and return None.\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and only existing labels are\n dropped.\n\n Returns\n -------\n DataFrame or None\n DataFrame without the removed index or column labels or\n None if ``inplace=True``.\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis.\n\n See Also\n --------\n DataFrame.loc : Label-location based indexer for selection by label.\n DataFrame.dropna : Return DataFrame with labels on given axis omitted\n where (all or any) data are missing.\n DataFrame.drop_duplicates : Return DataFrame with duplicate rows\n removed, optionally only considering certain columns.\n Series.drop : Return Series with specified index labels removed.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.arange(12).reshape(3, 4),\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 0 1 2 3\n 1 4 5 6 7\n 2 8 9 10 11\n\n Drop columns\n\n >>> df.drop(['B', 'C'], axis=1)\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n >>> df.drop(columns=['B', 'C'])\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n Drop a row by index\n\n >>> df.drop([0, 1])\n A B C D\n 2 8 9 10 11\n\n Drop columns and/or rows of MultiIndex DataFrame\n\n >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> df = pd.DataFrame(index=midx, columns=['big', 'small'],\n ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],\n ... [250, 150], [1.5, 0.8], [320, 250],\n ... [1, 0.8], [0.3, 0.2]])\n >>> df\n big small\n lama speed 45.0 30.0\n weight 200.0 100.0\n length 1.5 1.0\n cow speed 30.0 20.0\n weight 250.0 150.0\n length 1.5 0.8\n falcon speed 320.0 250.0\n weight 1.0 0.8\n length 0.3 0.2\n\n >>> df.drop(index='cow', columns='small')\n big\n lama speed 45.0\n weight 200.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n\n >>> df.drop(index='length', level=1)\n big small\n lama speed 45.0 30.0\n weight 200.0 100.0\n cow speed 30.0 20.0\n weight 250.0 150.0\n falcon speed 320.0 250.0\n weight 1.0 0.8\n \"\"\"\n return super().drop(\n labels=labels,\n axis=axis,\n index=index,\n columns=columns,\n level=level,\n inplace=inplace,\n errors=errors,\n )\n\n @rewrite_axis_style_signature(\n \"mapper\",\n [(\"copy\", True), (\"inplace\", False), (\"level\", None), (\"errors\", \"ignore\")],\n )\n def rename(\n self,\n mapper: Optional[Renamer] = None,\n *,\n index: Optional[Renamer] = None,\n columns: Optional[Renamer] = None,\n axis: Optional[Axis] = None,\n copy: bool = True,\n inplace: bool = False,\n level: Optional[Level] = None,\n errors: str = \"ignore\",\n ) -> Optional[DataFrame]:\n \"\"\"\n Alter axes labels.\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don't throw an\n error.\n\n See the :ref:`user guide <basics.rename>` for more.\n\n Parameters\n ----------\n mapper : dict-like or function\n Dict-like or function transformations to apply to\n that axis' values. Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index`` and\n ``columns``.\n index : dict-like or function\n Alternative to specifying axis (``mapper, axis=0``\n is equivalent to ``index=mapper``).\n columns : dict-like or function\n Alternative to specifying axis (``mapper, axis=1``\n is equivalent to ``columns=mapper``).\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to target with ``mapper``. Can be either the axis name\n ('index', 'columns') or number (0, 1). The default is 'index'.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new DataFrame. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {'ignore', 'raise'}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If 'ignore', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n DataFrame or None\n DataFrame with the renamed axis labels or None if ``inplace=True``.\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n \"errors='raise'\".\n\n See Also\n --------\n DataFrame.rename_axis : Set the name of the axis.\n\n Examples\n --------\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Rename columns using a mapping:\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> df.rename(columns={\"A\": \"a\", \"B\": \"c\"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n Rename index using a mapping:\n\n >>> df.rename(index={0: \"x\", 1: \"y\", 2: \"z\"})\n A B\n x 1 4\n y 2 5\n z 3 6\n\n Cast index labels to a different type:\n\n >>> df.index\n RangeIndex(start=0, stop=3, step=1)\n >>> df.rename(index=str).index\n Index(['0', '1', '2'], dtype='object')\n\n >>> df.rename(columns={\"A\": \"a\", \"B\": \"b\", \"C\": \"c\"}, errors=\"raise\")\n Traceback (most recent call last):\n KeyError: ['C'] not found in axis\n\n Using axis-style parameters:\n\n >>> df.rename(str.lower, axis='columns')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis='index')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n \"\"\"\n return super().rename(\n mapper=mapper,\n index=index,\n columns=columns,\n axis=axis,\n copy=copy,\n inplace=inplace,\n level=level,\n errors=errors,\n )\n\n @doc(NDFrame.fillna, **_shared_doc_kwargs)\n def fillna(\n self,\n value=None,\n method: Optional[str] = None,\n axis: Optional[Axis] = None,\n inplace: bool = False,\n limit=None,\n downcast=None,\n ) -> Optional[DataFrame]:\n return super().fillna(\n value=value,\n method=method,\n axis=axis,\n inplace=inplace,\n limit=limit,\n downcast=downcast,\n )\n\n def pop(self, item: Hashable) -> Series:\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : label\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n \"\"\"\n return super().pop(item=item)\n\n @doc(NDFrame.replace, **_shared_doc_kwargs)\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace: bool = False,\n limit=None,\n regex: bool = False,\n method: str = \"pad\",\n ):\n return super().replace(\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n limit=limit,\n regex=regex,\n method=method,\n )\n\n def _replace_columnwise(\n self, mapping: Dict[Hashable, Tuple[Any, Any]], inplace: bool, regex\n ):\n \"\"\"\n Dispatch to Series.replace column-wise.\n\n\n Parameters\n ----------\n mapping : dict\n of the form {col: (target, value)}\n inplace : bool\n regex : bool or same types as `to_replace` in DataFrame.replace\n\n Returns\n -------\n DataFrame or None\n \"\"\"\n # Operate column-wise\n res = self if inplace else self.copy()\n ax = self.columns\n\n for i in range(len(ax)):\n if ax[i] in mapping:\n ser = self.iloc[:, i]\n\n target, value = mapping[ax[i]]\n newobj = ser.replace(target, value, regex=regex)\n\n res.iloc[:, i] = newobj\n\n if inplace:\n return\n return res.__finalize__(self)\n\n @doc(NDFrame.shift, klass=_shared_doc_kwargs[\"klass\"])\n def shift(\n self, periods=1, freq=None, axis: Axis = 0, fill_value=lib.no_default\n ) -> DataFrame:\n axis = self._get_axis_number(axis)\n\n ncols = len(self.columns)\n if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0:\n # We will infer fill_value to match the closest column\n\n # Use a column that we know is valid for our column's dtype GH#38434\n label = self.columns[0]\n\n if periods > 0:\n result = self.iloc[:, :-periods]\n for col in range(min(ncols, abs(periods))):\n # TODO(EA2D): doing this in a loop unnecessary with 2D EAs\n # Define filler inside loop so we get a copy\n filler = self.iloc[:, 0].shift(len(self))\n result.insert(0, label, filler, allow_duplicates=True)\n else:\n result = self.iloc[:, -periods:]\n for col in range(min(ncols, abs(periods))):\n # Define filler inside loop so we get a copy\n filler = self.iloc[:, -1].shift(len(self))\n result.insert(\n len(result.columns), label, filler, allow_duplicates=True\n )\n\n result.columns = self.columns.copy()\n return result\n\n return super().shift(\n periods=periods, freq=freq, axis=axis, fill_value=fill_value\n )\n\n def set_index(\n self,\n keys,\n drop: bool = True,\n append: bool = False,\n inplace: bool = False,\n verify_integrity: bool = False,\n ):\n \"\"\"\n Set the DataFrame index using existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of the correct length). The index can replace the\n existing index or expand on it.\n\n Parameters\n ----------\n keys : label or array-like or list of labels/arrays\n This parameter can be either a single column key, a single array of\n the same length as the calling DataFrame, or a list containing an\n arbitrary combination of column keys and arrays. Here, \"array\"\n encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and\n instances of :class:`~collections.abc.Iterator`.\n drop : bool, default True\n Delete columns to be used as the new index.\n append : bool, default False\n Whether to append columns to existing index.\n inplace : bool, default False\n If True, modifies the DataFrame in place (do not create a new object).\n verify_integrity : bool, default False\n Check the new index for duplicates. Otherwise defer the check until\n necessary. Setting to False will improve the performance of this\n method.\n\n Returns\n -------\n DataFrame or None\n Changed row labels or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.reset_index : Opposite of set_index.\n DataFrame.reindex : Change to new indices or expand indices.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame({'month': [1, 4, 7, 10],\n ... 'year': [2012, 2014, 2013, 2014],\n ... 'sale': [55, 40, 84, 31]})\n >>> df\n month year sale\n 0 1 2012 55\n 1 4 2014 40\n 2 7 2013 84\n 3 10 2014 31\n\n Set the index to become the 'month' column:\n\n >>> df.set_index('month')\n year sale\n month\n 1 2012 55\n 4 2014 40\n 7 2013 84\n 10 2014 31\n\n Create a MultiIndex using columns 'year' and 'month':\n\n >>> df.set_index(['year', 'month'])\n sale\n year month\n 2012 1 55\n 2014 4 40\n 2013 7 84\n 2014 10 31\n\n Create a MultiIndex using an Index and a column:\n\n >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])\n month sale\n year\n 1 2012 1 55\n 2 2014 4 40\n 3 2013 7 84\n 4 2014 10 31\n\n Create a MultiIndex using two Series:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> df.set_index([s, s**2])\n month year sale\n 1 1 1 2012 55\n 2 4 4 2014 40\n 3 9 7 2013 84\n 4 16 10 2014 31\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n self._check_inplace_and_allows_duplicate_labels(inplace)\n if not isinstance(keys, list):\n keys = [keys]\n\n err_msg = (\n 'The parameter \"keys\" may be a column key, one-dimensional '\n \"array, or a list containing only valid column keys and \"\n \"one-dimensional arrays.\"\n )\n\n missing: List[Hashable] = []\n for col in keys:\n if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):\n # arrays are fine as long as they are one-dimensional\n # iterators get converted to list below\n if getattr(col, \"ndim\", 1) != 1:\n raise ValueError(err_msg)\n else:\n # everything else gets tried as a key; see GH 24969\n try:\n found = col in self.columns\n except TypeError as err:\n raise TypeError(\n f\"{err_msg}. Received column of type {type(col)}\"\n ) from err\n else:\n if not found:\n missing.append(col)\n\n if missing:\n raise KeyError(f\"None of {missing} are in the columns\")\n\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n arrays = []\n names: List[Hashable] = []\n if append:\n names = list(self.index.names)\n if isinstance(self.index, MultiIndex):\n for i in range(self.index.nlevels):\n arrays.append(self.index._get_level_values(i))\n else:\n arrays.append(self.index)\n\n to_remove: List[Hashable] = []\n for col in keys:\n if isinstance(col, MultiIndex):\n for n in range(col.nlevels):\n arrays.append(col._get_level_values(n))\n names.extend(col.names)\n elif isinstance(col, (Index, Series)):\n # if Index then not MultiIndex (treated above)\n arrays.append(col)\n names.append(col.name)\n elif isinstance(col, (list, np.ndarray)):\n arrays.append(col)\n names.append(None)\n elif isinstance(col, abc.Iterator):\n arrays.append(list(col))\n names.append(None)\n # from here, col can only be a column label\n else:\n arrays.append(frame[col]._values)\n names.append(col)\n if drop:\n to_remove.append(col)\n\n if len(arrays[-1]) != len(self):\n # check newest element against length of calling frame, since\n # ensure_index_from_sequences would not raise for append=False.\n raise ValueError(\n f\"Length mismatch: Expected {len(self)} rows, \"\n f\"received array of length {len(arrays[-1])}\"\n )\n\n index = ensure_index_from_sequences(arrays, names)\n\n if verify_integrity and not index.is_unique:\n duplicates = index[index.duplicated()].unique()\n raise ValueError(f\"Index has duplicate keys: {duplicates}\")\n\n # use set to handle duplicate column names gracefully in case of drop\n for c in set(to_remove):\n del frame[c]\n\n # clear up memory usage\n index._cleanup()\n\n frame.index = index\n\n if not inplace:\n return frame\n\n @overload\n # https://github.com/python/mypy/issues/6580\n # Overloaded function signatures 1 and 2 overlap with incompatible return types\n def reset_index( # type: ignore[misc]\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,\n drop: bool = ...,\n inplace: Literal[False] = ...,\n col_level: Hashable = ...,\n col_fill: Hashable = ...,\n ) -> DataFrame:\n ...\n\n @overload\n def reset_index(\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,\n drop: bool = ...,\n inplace: Literal[True] = ...,\n col_level: Hashable = ...,\n col_fill: Hashable = ...,\n ) -> None:\n ...\n\n def reset_index(\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n drop: bool = False,\n inplace: bool = False,\n col_level: Hashable = 0,\n col_fill: Hashable = \"\",\n ) -> Optional[DataFrame]:\n \"\"\"\n Reset the index, or a level of it.\n\n Reset the index of the DataFrame, and use the default one instead.\n If the DataFrame has a MultiIndex, this method can remove one or more\n levels.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default.\n drop : bool, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n col_level : int or str, default 0\n If the columns have multiple levels, determines which level the\n labels are inserted into. By default it is inserted into the first\n level.\n col_fill : object, default ''\n If the columns have multiple levels, determines how the other\n levels are named. If None then the index name is repeated.\n\n Returns\n -------\n DataFrame or None\n DataFrame with the new index or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.set_index : Opposite of reset_index.\n DataFrame.reindex : Change to new indices or expand indices.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame([('bird', 389.0),\n ... ('bird', 24.0),\n ... ('mammal', 80.5),\n ... ('mammal', np.nan)],\n ... index=['falcon', 'parrot', 'lion', 'monkey'],\n ... columns=('class', 'max_speed'))\n >>> df\n class max_speed\n falcon bird 389.0\n parrot bird 24.0\n lion mammal 80.5\n monkey mammal NaN\n\n When we reset the index, the old index is added as a column, and a\n new sequential index is used:\n\n >>> df.reset_index()\n index class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n We can use the `drop` parameter to avoid the old index being added as\n a column:\n\n >>> df.reset_index(drop=True)\n class max_speed\n 0 bird 389.0\n 1 bird 24.0\n 2 mammal 80.5\n 3 mammal NaN\n\n You can also use `reset_index` with `MultiIndex`.\n\n >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),\n ... ('bird', 'parrot'),\n ... ('mammal', 'lion'),\n ... ('mammal', 'monkey')],\n ... names=['class', 'name'])\n >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),\n ... ('species', 'type')])\n >>> df = pd.DataFrame([(389.0, 'fly'),\n ... ( 24.0, 'fly'),\n ... ( 80.5, 'run'),\n ... (np.nan, 'jump')],\n ... index=index,\n ... columns=columns)\n >>> df\n speed species\n max type\n class name\n bird falcon 389.0 fly\n parrot 24.0 fly\n mammal lion 80.5 run\n monkey NaN jump\n\n If the index has multiple levels, we can reset a subset of them:\n\n >>> df.reset_index(level='class')\n class speed species\n max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we are not dropping the index, by default, it is placed in the top\n level. We can place it in another level:\n\n >>> df.reset_index(level='class', col_level=1)\n speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n When the index is inserted under another level, we can specify under\n which one with the parameter `col_fill`:\n\n >>> df.reset_index(level='class', col_level=1, col_fill='species')\n species speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we specify a nonexistent level for `col_fill`, it is created:\n\n >>> df.reset_index(level='class', col_level=1, col_fill='genus')\n genus speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n self._check_inplace_and_allows_duplicate_labels(inplace)\n if inplace:\n new_obj = self\n else:\n new_obj = self.copy()\n\n new_index = ibase.default_index(len(new_obj))\n if level is not None:\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < self.index.nlevels:\n new_index = self.index.droplevel(level)\n\n if not drop:\n to_insert: Iterable[Tuple[Any, Optional[Any]]]\n if isinstance(self.index, MultiIndex):\n names = [\n (n if n is not None else f\"level_{i}\")\n for i, n in enumerate(self.index.names)\n ]\n to_insert = zip(self.index.levels, self.index.codes)\n else:\n default = \"index\" if \"index\" not in self else \"level_0\"\n names = [default] if self.index.name is None else [self.index.name]\n to_insert = ((self.index, None),)\n\n multi_col = isinstance(self.columns, MultiIndex)\n for i, (lev, lab) in reversed(list(enumerate(to_insert))):\n if not (level is None or i in level):\n continue\n name = names[i]\n if multi_col:\n col_name = list(name) if isinstance(name, tuple) else [name]\n if col_fill is None:\n if len(col_name) not in (1, self.columns.nlevels):\n raise ValueError(\n \"col_fill=None is incompatible \"\n f\"with incomplete column name {name}\"\n )\n col_fill = col_name[0]\n\n lev_num = self.columns._get_level_number(col_level)\n name_lst = [col_fill] * lev_num + col_name\n missing = self.columns.nlevels - len(name_lst)\n name_lst += [col_fill] * missing\n name = tuple(name_lst)\n\n # to ndarray and maybe infer different dtype\n level_values = lev._values\n if level_values.dtype == np.object_:\n level_values = lib.maybe_convert_objects(level_values)\n\n if lab is not None:\n # if we have the codes, extract the values with a mask\n level_values = algorithms.take(\n level_values, lab, allow_fill=True, fill_value=lev._na_value\n )\n\n new_obj.insert(0, name, level_values)\n\n new_obj.index = new_index\n if not inplace:\n return new_obj\n\n return None\n\n # ----------------------------------------------------------------------\n # Reindex-based selection methods\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isna(self) -> DataFrame:\n result = self._constructor(self._mgr.isna(func=isna))\n return result.__finalize__(self, method=\"isna\")\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isnull(self) -> DataFrame:\n return self.isna()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notna(self) -> DataFrame:\n return ~self.isna()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notnull(self) -> DataFrame:\n return ~self.isna()\n\n def dropna(\n self,\n axis: Axis = 0,\n how: str = \"any\",\n thresh=None,\n subset=None,\n inplace: bool = False,\n ):\n \"\"\"\n Remove missing values.\n\n See the :ref:`User Guide <missing_data>` for more on which values are\n considered missing, and how to work with missing data.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine if rows or columns which contain missing values are\n removed.\n\n * 0, or 'index' : Drop rows which contain missing values.\n * 1, or 'columns' : Drop columns which contain missing value.\n\n .. versionchanged:: 1.0.0\n\n Pass tuple or list to drop on multiple axes.\n Only a single axis is allowed.\n\n how : {'any', 'all'}, default 'any'\n Determine if row or column is removed from DataFrame, when we have\n at least one NA or all NA.\n\n * 'any' : If any NA values are present, drop that row or column.\n * 'all' : If all values are NA, drop that row or column.\n\n thresh : int, optional\n Require that many non-NA values.\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame or None\n DataFrame with NA entries dropped from it or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.isna: Indicate missing values.\n DataFrame.notna : Indicate existing (non-missing) values.\n DataFrame.fillna : Replace missing values.\n Series.dropna : Drop missing values.\n Index.dropna : Drop missing indices.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"name\": ['Alfred', 'Batman', 'Catwoman'],\n ... \"toy\": [np.nan, 'Batmobile', 'Bullwhip'],\n ... \"born\": [pd.NaT, pd.Timestamp(\"1940-04-25\"),\n ... pd.NaT]})\n >>> df\n name toy born\n 0 Alfred NaN NaT\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Drop the rows where at least one element is missing.\n\n >>> df.dropna()\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Drop the columns where at least one element is missing.\n\n >>> df.dropna(axis='columns')\n name\n 0 Alfred\n 1 Batman\n 2 Catwoman\n\n Drop the rows where all elements are missing.\n\n >>> df.dropna(how='all')\n name toy born\n 0 Alfred NaN NaT\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Keep only the rows with at least 2 non-NA values.\n\n >>> df.dropna(thresh=2)\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Define in which columns to look for missing values.\n\n >>> df.dropna(subset=['name', 'toy'])\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.dropna(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if isinstance(axis, (tuple, list)):\n # GH20987\n raise TypeError(\"supplying multiple axes to axis is no longer supported.\")\n\n axis = self._get_axis_number(axis)\n agg_axis = 1 - axis\n\n agg_obj = self\n if subset is not None:\n ax = self._get_axis(agg_axis)\n indices = ax.get_indexer_for(subset)\n check = indices == -1\n if check.any():\n raise KeyError(list(np.compress(check, subset)))\n agg_obj = self.take(indices, axis=agg_axis)\n\n count = agg_obj.count(axis=agg_axis)\n\n if thresh is not None:\n mask = count >= thresh\n elif how == \"any\":\n mask = count == len(agg_obj._get_axis(agg_axis))\n elif how == \"all\":\n mask = count > 0\n else:\n if how is not None:\n raise ValueError(f\"invalid how option: {how}\")\n else:\n raise TypeError(\"must specify how or thresh\")\n\n result = self.loc(axis=axis)[mask]\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def drop_duplicates(\n self,\n subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n keep: Union[str, bool] = \"first\",\n inplace: bool = False,\n ignore_index: bool = False,\n ) -> Optional[DataFrame]:\n \"\"\"\n Return DataFrame with duplicate rows removed.\n\n Considering certain columns is optional. Indexes, including time indexes\n are ignored.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to keep.\n - ``first`` : Drop duplicates except for the first occurrence.\n - ``last`` : Drop duplicates except for the last occurrence.\n - False : Drop all duplicates.\n inplace : bool, default False\n Whether to drop duplicates in place or to return a copy.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n DataFrame or None\n DataFrame with duplicates removed or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.value_counts: Count unique combinations of columns.\n\n Examples\n --------\n Consider dataset containing ramen rating.\n\n >>> df = pd.DataFrame({\n ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],\n ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],\n ... 'rating': [4, 4, 3.5, 15, 5]\n ... })\n >>> df\n brand style rating\n 0 Yum Yum cup 4.0\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n By default, it removes duplicate rows based on all columns.\n\n >>> df.drop_duplicates()\n brand style rating\n 0 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n To remove duplicates on specific column(s), use ``subset``.\n\n >>> df.drop_duplicates(subset=['brand'])\n brand style rating\n 0 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n\n To remove duplicates and keep last occurrences, use ``keep``.\n\n >>> df.drop_duplicates(subset=['brand', 'style'], keep='last')\n brand style rating\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 4 Indomie pack 5.0\n \"\"\"\n if self.empty:\n return self.copy()\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n ignore_index = validate_bool_kwarg(ignore_index, \"ignore_index\")\n duplicated = self.duplicated(subset, keep=keep)\n\n result = self[-duplicated]\n if ignore_index:\n result.index = ibase.default_index(len(result))\n\n if inplace:\n self._update_inplace(result)\n return None\n else:\n return result\n\n def duplicated(\n self,\n subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n keep: Union[str, bool] = \"first\",\n ) -> Series:\n \"\"\"\n Return boolean Series denoting duplicate rows.\n\n Considering certain columns is optional.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to mark.\n\n - ``first`` : Mark duplicates as ``True`` except for the first occurrence.\n - ``last`` : Mark duplicates as ``True`` except for the last occurrence.\n - False : Mark all duplicates as ``True``.\n\n Returns\n -------\n Series\n Boolean series for each duplicated rows.\n\n See Also\n --------\n Index.duplicated : Equivalent method on index.\n Series.duplicated : Equivalent method on Series.\n Series.drop_duplicates : Remove duplicate values from Series.\n DataFrame.drop_duplicates : Remove duplicate values from DataFrame.\n\n Examples\n --------\n Consider dataset containing ramen rating.\n\n >>> df = pd.DataFrame({\n ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],\n ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],\n ... 'rating': [4, 4, 3.5, 15, 5]\n ... })\n >>> df\n brand style rating\n 0 Yum Yum cup 4.0\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n By default, for each set of duplicated values, the first occurrence\n is set on False and all others on True.\n\n >>> df.duplicated()\n 0 False\n 1 True\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True.\n\n >>> df.duplicated(keep='last')\n 0 True\n 1 False\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n By setting ``keep`` on False, all duplicates are True.\n\n >>> df.duplicated(keep=False)\n 0 True\n 1 True\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n To find duplicates on specific column(s), use ``subset``.\n\n >>> df.duplicated(subset=['brand'])\n 0 False\n 1 True\n 2 False\n 3 True\n 4 True\n dtype: bool\n \"\"\"\n from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64\n\n if self.empty:\n return self._constructor_sliced(dtype=bool)\n\n def f(vals):\n labels, shape = algorithms.factorize(\n vals, size_hint=min(len(self), SIZE_HINT_LIMIT)\n )\n return labels.astype(\"i8\", copy=False), len(shape)\n\n if subset is None:\n subset = self.columns\n elif (\n not np.iterable(subset)\n or isinstance(subset, str)\n or isinstance(subset, tuple)\n and subset in self.columns\n ):\n subset = (subset,)\n\n # needed for mypy since can't narrow types using np.iterable\n subset = cast(Iterable, subset)\n\n # Verify all columns in subset exist in the queried dataframe\n # Otherwise, raise a KeyError, same as if you try to __getitem__ with a\n # key that doesn't exist.\n diff = Index(subset).difference(self.columns)\n if not diff.empty:\n raise KeyError(diff)\n\n vals = (col.values for name, col in self.items() if name in subset)\n labels, shape = map(list, zip(*map(f, vals)))\n\n ids = get_group_index(labels, shape, sort=False, xnull=False)\n result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)\n return result.__finalize__(self, method=\"duplicated\")\n\n # ----------------------------------------------------------------------\n # Sorting\n # TODO: Just move the sort_values doc here.\n @Substitution(**_shared_doc_kwargs)\n @Appender(NDFrame.sort_values.__doc__)\n # error: Signature of \"sort_values\" incompatible with supertype \"NDFrame\"\n def sort_values( # type: ignore[override]\n self,\n by,\n axis: Axis = 0,\n ascending=True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool = False,\n key: ValueKeyFunc = None,\n ):\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n axis = self._get_axis_number(axis)\n\n if not isinstance(by, list):\n by = [by]\n if is_sequence(ascending) and len(by) != len(ascending):\n raise ValueError(\n f\"Length of ascending ({len(ascending)}) != length of by ({len(by)})\"\n )\n if len(by) > 1:\n\n keys = [self._get_label_or_level_values(x, axis=axis) for x in by]\n\n # need to rewrap columns in Series to apply key function\n if key is not None:\n keys = [Series(k, name=name) for (k, name) in zip(keys, by)]\n\n indexer = lexsort_indexer(\n keys, orders=ascending, na_position=na_position, key=key\n )\n indexer = ensure_platform_int(indexer)\n else:\n\n by = by[0]\n k = self._get_label_or_level_values(by, axis=axis)\n\n # need to rewrap column in Series to apply key function\n if key is not None:\n k = Series(k, name=by)\n\n if isinstance(ascending, (tuple, list)):\n ascending = ascending[0]\n\n indexer = nargsort(\n k, kind=kind, ascending=ascending, na_position=na_position, key=key\n )\n\n new_data = self._mgr.take(\n indexer, axis=self._get_block_manager_axis(axis), verify=False\n )\n\n if ignore_index:\n new_data.set_axis(1, ibase.default_index(len(indexer)))\n\n result = self._constructor(new_data)\n if inplace:\n return self._update_inplace(result)\n else:\n return result.__finalize__(self, method=\"sort_values\")\n\n def sort_index(\n self,\n axis: Axis = 0,\n level: Optional[Level] = None,\n ascending: bool = True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n sort_remaining: bool = True,\n ignore_index: bool = False,\n key: IndexKeyFunc = None,\n ):\n \"\"\"\n Sort object by labels (along an axis).\n\n Returns a new DataFrame sorted by label if `inplace` argument is\n ``False``, otherwise updates the original DataFrame and returns None.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis along which to sort. The value 0 identifies the rows,\n and 1 identifies the columns.\n level : int or level name or list of ints or list of level names\n If not None, sort on values in specified index level(s).\n ascending : bool or list of bools, default True\n Sort ascending vs. descending. When the index is a MultiIndex the\n sort direction can be controlled for each level individually.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See also :func:`numpy.sort` for more\n information. `mergesort` and `stable` are the only stable algorithms. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n key : callable, optional\n If not None, apply the key function to the index values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect an\n ``Index`` and return an ``Index`` of the same shape. For MultiIndex\n inputs, the key is applied *per level*.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame or None\n The original DataFrame sorted by the labels or None if ``inplace=True``.\n\n See Also\n --------\n Series.sort_index : Sort Series by the index.\n DataFrame.sort_values : Sort DataFrame by the value.\n Series.sort_values : Sort Series by the value.\n\n Examples\n --------\n >>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],\n ... columns=['A'])\n >>> df.sort_index()\n A\n 1 4\n 29 2\n 100 1\n 150 5\n 234 3\n\n By default, it sorts in ascending order, to sort in descending order,\n use ``ascending=False``\n\n >>> df.sort_index(ascending=False)\n A\n 234 3\n 150 5\n 100 1\n 29 2\n 1 4\n\n A key function can be specified which is applied to the index before\n sorting. For a ``MultiIndex`` this is applied to each level separately.\n\n >>> df = pd.DataFrame({\"a\": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])\n >>> df.sort_index(key=lambda x: x.str.lower())\n a\n A 1\n b 2\n C 3\n d 4\n \"\"\"\n return super().sort_index(\n axis,\n level,\n ascending,\n inplace,\n kind,\n na_position,\n sort_remaining,\n ignore_index,\n key,\n )\n\n def value_counts(\n self,\n subset: Optional[Sequence[Hashable]] = None,\n normalize: bool = False,\n sort: bool = True,\n ascending: bool = False,\n ):\n \"\"\"\n Return a Series containing counts of unique rows in the DataFrame.\n\n .. versionadded:: 1.1.0\n\n Parameters\n ----------\n subset : list-like, optional\n Columns to use when counting unique combinations.\n normalize : bool, default False\n Return proportions rather than frequencies.\n sort : bool, default True\n Sort by frequencies.\n ascending : bool, default False\n Sort in ascending order.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.value_counts: Equivalent method on Series.\n\n Notes\n -----\n The returned Series will have a MultiIndex with one level per input\n column. By default, rows that contain any NA values are omitted from\n the result. By default, the resulting Series will be in descending\n order so that the first element is the most frequently-occurring row.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],\n ... 'num_wings': [2, 0, 0, 0]},\n ... index=['falcon', 'dog', 'cat', 'ant'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n cat 4 0\n ant 6 0\n\n >>> df.value_counts()\n num_legs num_wings\n 4 0 2\n 2 2 1\n 6 0 1\n dtype: int64\n\n >>> df.value_counts(sort=False)\n num_legs num_wings\n 2 2 1\n 4 0 2\n 6 0 1\n dtype: int64\n\n >>> df.value_counts(ascending=True)\n num_legs num_wings\n 2 2 1\n 6 0 1\n 4 0 2\n dtype: int64\n\n >>> df.value_counts(normalize=True)\n num_legs num_wings\n 4 0 0.50\n 2 2 0.25\n 6 0 0.25\n dtype: float64\n \"\"\"\n if subset is None:\n subset = self.columns.tolist()\n\n counts = self.groupby(subset).grouper.size()\n\n if sort:\n counts = counts.sort_values(ascending=ascending)\n if normalize:\n counts /= counts.sum()\n\n # Force MultiIndex for single column\n if len(subset) == 1:\n counts.index = MultiIndex.from_arrays(\n [counts.index], names=[counts.index.name]\n )\n\n return counts\n\n def nlargest(self, n, columns, keep: str = \"first\") -> DataFrame:\n \"\"\"\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=False).head(n)``, but more\n performant.\n\n Parameters\n ----------\n n : int\n Number of rows to return.\n columns : label or list of labels\n Column label(s) to order by.\n keep : {'first', 'last', 'all'}, default 'first'\n Where there are duplicate values:\n\n - `first` : prioritize the first occurrence(s)\n - `last` : prioritize the last occurrence(s)\n - ``all`` : do not drop any duplicates, even it means\n selecting more than `n` items.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n The first `n` rows ordered by the given columns in descending\n order.\n\n See Also\n --------\n DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in\n ascending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Notes\n -----\n This function cannot be used with all column types. For example, when\n specifying columns with `object` or `category` dtypes, ``TypeError`` is\n raised.\n\n Examples\n --------\n >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,\n ... 434000, 434000, 337000, 11300,\n ... 11300, 11300],\n ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,\n ... 17036, 182, 38, 311],\n ... 'alpha-2': [\"IT\", \"FR\", \"MT\", \"MV\", \"BN\",\n ... \"IS\", \"NR\", \"TV\", \"AI\"]},\n ... index=[\"Italy\", \"France\", \"Malta\",\n ... \"Maldives\", \"Brunei\", \"Iceland\",\n ... \"Nauru\", \"Tuvalu\", \"Anguilla\"])\n >>> df\n population GDP alpha-2\n Italy 59000000 1937894 IT\n France 65000000 2583560 FR\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n Iceland 337000 17036 IS\n Nauru 11300 182 NR\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n\n In the following example, we will use ``nlargest`` to select the three\n rows having the largest values in column \"population\".\n\n >>> df.nlargest(3, 'population')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Malta 434000 12011 MT\n\n When using ``keep='last'``, ties are resolved in reverse order:\n\n >>> df.nlargest(3, 'population', keep='last')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Brunei 434000 12128 BN\n\n When using ``keep='all'``, all duplicate items are maintained:\n\n >>> df.nlargest(3, 'population', keep='all')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n\n To order by the largest values in column \"population\" and then \"GDP\",\n we can specify multiple columns like in the next example.\n\n >>> df.nlargest(3, ['population', 'GDP'])\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Brunei 434000 12128 BN\n \"\"\"\n return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()\n\n def nsmallest(self, n, columns, keep: str = \"first\") -> DataFrame:\n \"\"\"\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=True).head(n)``, but more\n performant.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n columns : list or str\n Column name or names to order by.\n keep : {'first', 'last', 'all'}, default 'first'\n Where there are duplicate values:\n\n - ``first`` : take the first occurrence.\n - ``last`` : take the last occurrence.\n - ``all`` : do not drop any duplicates, even it means\n selecting more than `n` items.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.nlargest : Return the first `n` rows ordered by `columns` in\n descending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Examples\n --------\n >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,\n ... 434000, 434000, 337000, 337000,\n ... 11300, 11300],\n ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,\n ... 17036, 182, 38, 311],\n ... 'alpha-2': [\"IT\", \"FR\", \"MT\", \"MV\", \"BN\",\n ... \"IS\", \"NR\", \"TV\", \"AI\"]},\n ... index=[\"Italy\", \"France\", \"Malta\",\n ... \"Maldives\", \"Brunei\", \"Iceland\",\n ... \"Nauru\", \"Tuvalu\", \"Anguilla\"])\n >>> df\n population GDP alpha-2\n Italy 59000000 1937894 IT\n France 65000000 2583560 FR\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n Iceland 337000 17036 IS\n Nauru 337000 182 NR\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n\n In the following example, we will use ``nsmallest`` to select the\n three rows having the smallest values in column \"population\".\n\n >>> df.nsmallest(3, 'population')\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Iceland 337000 17036 IS\n\n When using ``keep='last'``, ties are resolved in reverse order:\n\n >>> df.nsmallest(3, 'population', keep='last')\n population GDP alpha-2\n Anguilla 11300 311 AI\n Tuvalu 11300 38 TV\n Nauru 337000 182 NR\n\n When using ``keep='all'``, all duplicate items are maintained:\n\n >>> df.nsmallest(3, 'population', keep='all')\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Iceland 337000 17036 IS\n Nauru 337000 182 NR\n\n To order by the smallest values in column \"population\" and then \"GDP\", we can\n specify multiple columns like in the next example.\n\n >>> df.nsmallest(3, ['population', 'GDP'])\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Nauru 337000 182 NR\n \"\"\"\n return algorithms.SelectNFrame(\n self, n=n, keep=keep, columns=columns\n ).nsmallest()\n\n def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame:\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis.\n\n Parameters\n ----------\n i, j : int or str\n Levels of the indices to be swapped. Can pass level name as string.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to swap levels on. 0 or 'index' for row-wise, 1 or\n 'columns' for column-wise.\n\n Returns\n -------\n DataFrame\n \"\"\"\n result = self.copy()\n\n axis = self._get_axis_number(axis)\n\n if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover\n raise TypeError(\"Can only swap levels on a hierarchical axis.\")\n\n if axis == 0:\n assert isinstance(result.index, MultiIndex)\n result.index = result.index.swaplevel(i, j)\n else:\n assert isinstance(result.columns, MultiIndex)\n result.columns = result.columns.swaplevel(i, j)\n return result\n\n def reorder_levels(self, order: Sequence[Axis], axis: Axis = 0) -> DataFrame:\n \"\"\"\n Rearrange index levels using input order. May not drop or duplicate levels.\n\n Parameters\n ----------\n order : list of int or list of str\n List representing new level order. Reference level by number\n (position) or by key (label).\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Where to reorder levels.\n\n Returns\n -------\n DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover\n raise TypeError(\"Can only reorder levels on a hierarchical axis.\")\n\n result = self.copy()\n\n if axis == 0:\n assert isinstance(result.index, MultiIndex)\n result.index = result.index.reorder_levels(order)\n else:\n assert isinstance(result.columns, MultiIndex)\n result.columns = result.columns.reorder_levels(order)\n return result\n\n # ----------------------------------------------------------------------\n # Arithmetic Methods\n\n def _cmp_method(self, other, op):\n axis = 1 # only relevant for Series other case\n\n self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None)\n\n # See GH#4537 for discussion of scalar op behavior\n new_data = self._dispatch_frame_op(other, op, axis=axis)\n return self._construct_result(new_data)\n\n def _arith_method(self, other, op):\n if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None):\n return ops.frame_arith_method_with_reindex(self, other, op)\n\n axis = 1 # only relevant for Series other case\n\n self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None)\n\n new_data = self._dispatch_frame_op(other, op, axis=axis)\n return self._construct_result(new_data)\n\n _logical_method = _arith_method\n\n def _dispatch_frame_op(self, right, func, axis: Optional[int] = None):\n \"\"\"\n Evaluate the frame operation func(left, right) by evaluating\n column-by-column, dispatching to the Series implementation.\n\n Parameters\n ----------\n right : scalar, Series, or DataFrame\n func : arithmetic or comparison operator\n axis : {None, 0, 1}\n\n Returns\n -------\n DataFrame\n \"\"\"\n # Get the appropriate array-op to apply to each column/block's values.\n array_op = ops.get_array_op(func)\n\n right = lib.item_from_zerodim(right)\n if not is_list_like(right):\n # i.e. scalar, faster than checking np.ndim(right) == 0\n bm = self._mgr.apply(array_op, right=right)\n return type(self)(bm)\n\n elif isinstance(right, DataFrame):\n assert self.index.equals(right.index)\n assert self.columns.equals(right.columns)\n # TODO: The previous assertion `assert right._indexed_same(self)`\n # fails in cases with empty columns reached via\n # _frame_arith_method_with_reindex\n\n # TODO operate_blockwise expects a manager of the same type\n bm = self._mgr.operate_blockwise(\n right._mgr, array_op # type: ignore[arg-type]\n )\n return type(self)(bm)\n\n elif isinstance(right, Series) and axis == 1:\n # axis=1 means we want to operate row-by-row\n assert right.index.equals(self.columns)\n\n right = right._values\n # maybe_align_as_frame ensures we do not have an ndarray here\n assert not isinstance(right, np.ndarray)\n\n arrays = [\n array_op(_left, _right)\n for _left, _right in zip(self._iter_column_arrays(), right)\n ]\n\n elif isinstance(right, Series):\n assert right.index.equals(self.index) # Handle other cases later\n right = right._values\n\n arrays = [array_op(left, right) for left in self._iter_column_arrays()]\n\n else:\n # Remaining cases have less-obvious dispatch rules\n raise NotImplementedError(right)\n\n return type(self)._from_arrays(\n arrays, self.columns, self.index, verify_integrity=False\n )\n\n def _combine_frame(self, other: DataFrame, func, fill_value=None):\n # at this point we have `self._indexed_same(other)`\n\n if fill_value is None:\n # since _arith_op may be called in a loop, avoid function call\n # overhead if possible by doing this check once\n _arith_op = func\n\n else:\n\n def _arith_op(left, right):\n # for the mixed_type case where we iterate over columns,\n # _arith_op(left, right) is equivalent to\n # left._binop(right, func, fill_value=fill_value)\n left, right = ops.fill_binop(left, right, fill_value)\n return func(left, right)\n\n new_data = self._dispatch_frame_op(other, _arith_op)\n return new_data\n\n def _construct_result(self, result) -> DataFrame:\n \"\"\"\n Wrap the result of an arithmetic, comparison, or logical operation.\n\n Parameters\n ----------\n result : DataFrame\n\n Returns\n -------\n DataFrame\n \"\"\"\n out = self._constructor(result, copy=False)\n # Pin columns instead of passing to constructor for compat with\n # non-unique columns case\n out.columns = self.columns\n out.index = self.index\n return out\n\n def __divmod__(self, other) -> Tuple[DataFrame, DataFrame]:\n # Naive implementation, room for optimization\n div = self // other\n mod = self - div * other\n return div, mod\n\n def __rdivmod__(self, other) -> Tuple[DataFrame, DataFrame]:\n # Naive implementation, room for optimization\n div = other // self\n mod = other - div * self\n return div, mod\n\n # ----------------------------------------------------------------------\n # Combination-Related\n\n @doc(\n _shared_docs[\"compare\"],\n \"\"\"\nReturns\n-------\nDataFrame\n DataFrame that shows the differences stacked side by side.\n\n The resulting index will be a MultiIndex with 'self' and 'other'\n stacked alternately at the inner level.\n\nRaises\n------\nValueError\n When the two DataFrames don't have identical labels or shape.\n\nSee Also\n--------\nSeries.compare : Compare with another Series and show differences.\nDataFrame.equals : Test whether two objects contain the same elements.\n\nNotes\n-----\nMatching NaNs will not appear as a difference.\n\nCan only compare identically-labeled\n(i.e. same shape, identical row and column labels) DataFrames\n\nExamples\n--------\n>>> df = pd.DataFrame(\n... {{\n... \"col1\": [\"a\", \"a\", \"b\", \"b\", \"a\"],\n... \"col2\": [1.0, 2.0, 3.0, np.nan, 5.0],\n... \"col3\": [1.0, 2.0, 3.0, 4.0, 5.0]\n... }},\n... columns=[\"col1\", \"col2\", \"col3\"],\n... )\n>>> df\n col1 col2 col3\n0 a 1.0 1.0\n1 a 2.0 2.0\n2 b 3.0 3.0\n3 b NaN 4.0\n4 a 5.0 5.0\n\n>>> df2 = df.copy()\n>>> df2.loc[0, 'col1'] = 'c'\n>>> df2.loc[2, 'col3'] = 4.0\n>>> df2\n col1 col2 col3\n0 c 1.0 1.0\n1 a 2.0 2.0\n2 b 3.0 4.0\n3 b NaN 4.0\n4 a 5.0 5.0\n\nAlign the differences on columns\n\n>>> df.compare(df2)\n col1 col3\n self other self other\n0 a c NaN NaN\n2 NaN NaN 3.0 4.0\n\nStack the differences on rows\n\n>>> df.compare(df2, align_axis=0)\n col1 col3\n0 self a NaN\n other c NaN\n2 self NaN 3.0\n other NaN 4.0\n\nKeep the equal values\n\n>>> df.compare(df2, keep_equal=True)\n col1 col3\n self other self other\n0 a c 1.0 1.0\n2 b b 3.0 4.0\n\nKeep all original rows and columns\n\n>>> df.compare(df2, keep_shape=True)\n col1 col2 col3\n self other self other self other\n0 a c NaN NaN NaN NaN\n1 NaN NaN NaN NaN NaN NaN\n2 NaN NaN NaN NaN 3.0 4.0\n3 NaN NaN NaN NaN NaN NaN\n4 NaN NaN NaN NaN NaN NaN\n\nKeep all original rows and columns and also all original values\n\n>>> df.compare(df2, keep_shape=True, keep_equal=True)\n col1 col2 col3\n self other self other self other\n0 a c 1.0 1.0 1.0 1.0\n1 a a 2.0 2.0 2.0 2.0\n2 b b 3.0 3.0 3.0 4.0\n3 b b NaN NaN 4.0 4.0\n4 a a 5.0 5.0 5.0 5.0\n\"\"\",\n klass=_shared_doc_kwargs[\"klass\"],\n )\n def compare(\n self,\n other: DataFrame,\n align_axis: Axis = 1,\n keep_shape: bool = False,\n keep_equal: bool = False,\n ) -> DataFrame:\n return super().compare(\n other=other,\n align_axis=align_axis,\n keep_shape=keep_shape,\n keep_equal=keep_equal,\n )\n\n def combine(\n self, other: DataFrame, func, fill_value=None, overwrite: bool = True\n ) -> DataFrame:\n \"\"\"\n Perform column-wise combine with another DataFrame.\n\n Combines a DataFrame with `other` DataFrame using `func`\n to element-wise combine columns. The row and column indexes of the\n resulting DataFrame will be the union of the two.\n\n Parameters\n ----------\n other : DataFrame\n The DataFrame to merge column-wise.\n func : function\n Function that takes two series as inputs and return a Series or a\n scalar. Used to merge the two dataframes column by columns.\n fill_value : scalar value, default None\n The value to fill NaNs with prior to passing any column to the\n merge func.\n overwrite : bool, default True\n If True, columns in `self` that do not exist in `other` will be\n overwritten with NaNs.\n\n Returns\n -------\n DataFrame\n Combination of the provided DataFrames.\n\n See Also\n --------\n DataFrame.combine_first : Combine two DataFrame objects and default to\n non-null values in frame calling the method.\n\n Examples\n --------\n Combine using a simple function that chooses the smaller column.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2\n >>> df1.combine(df2, take_smaller)\n A B\n 0 0 3\n 1 0 3\n\n Example using a true element-wise combine function.\n\n >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine(df2, np.minimum)\n A B\n 0 1 2\n 1 0 3\n\n Using `fill_value` fills Nones prior to passing the column to the\n merge function.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine(df2, take_smaller, fill_value=-5)\n A B\n 0 0 -5.0\n 1 0 4.0\n\n However, if the same element in both dataframes is None, that None\n is preserved\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})\n >>> df1.combine(df2, take_smaller, fill_value=-5)\n A B\n 0 0 -5.0\n 1 0 3.0\n\n Example that demonstrates the use of `overwrite` and behavior when\n the axis differ between the dataframes.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])\n >>> df1.combine(df2, take_smaller)\n A B C\n 0 NaN NaN NaN\n 1 NaN 3.0 -10.0\n 2 NaN 3.0 1.0\n\n >>> df1.combine(df2, take_smaller, overwrite=False)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 -10.0\n 2 NaN 3.0 1.0\n\n Demonstrating the preference of the passed in dataframe.\n\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])\n >>> df2.combine(df1, take_smaller)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 NaN\n 2 NaN 3.0 NaN\n\n >>> df2.combine(df1, take_smaller, overwrite=False)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n \"\"\"\n other_idxlen = len(other.index) # save for compare\n\n this, other = self.align(other, copy=False)\n new_index = this.index\n\n if other.empty and len(new_index) == len(self.index):\n return self.copy()\n\n if self.empty and len(other) == other_idxlen:\n return other.copy()\n\n # sorts if possible\n new_columns = this.columns.union(other.columns)\n do_fill = fill_value is not None\n result = {}\n for col in new_columns:\n series = this[col]\n otherSeries = other[col]\n\n this_dtype = series.dtype\n other_dtype = otherSeries.dtype\n\n this_mask = isna(series)\n other_mask = isna(otherSeries)\n\n # don't overwrite columns unnecessarily\n # DO propagate if this column is not in the intersection\n if not overwrite and other_mask.all():\n result[col] = this[col].copy()\n continue\n\n if do_fill:\n series = series.copy()\n otherSeries = otherSeries.copy()\n series[this_mask] = fill_value\n otherSeries[other_mask] = fill_value\n\n if col not in self.columns:\n # If self DataFrame does not have col in other DataFrame,\n # try to promote series, which is all NaN, as other_dtype.\n new_dtype = other_dtype\n try:\n series = series.astype(new_dtype, copy=False)\n except ValueError:\n # e.g. new_dtype is integer types\n pass\n else:\n # if we have different dtypes, possibly promote\n new_dtype = find_common_type([this_dtype, other_dtype])\n if not is_dtype_equal(this_dtype, new_dtype):\n series = series.astype(new_dtype)\n if not is_dtype_equal(other_dtype, new_dtype):\n otherSeries = otherSeries.astype(new_dtype)\n\n arr = func(series, otherSeries)\n arr = maybe_downcast_to_dtype(arr, new_dtype)\n\n result[col] = arr\n\n # convert_objects just in case\n return self._constructor(result, index=new_index, columns=new_columns)\n\n def combine_first(self, other: DataFrame) -> DataFrame:\n \"\"\"\n Update null elements with value in the same location in `other`.\n\n Combine two DataFrame objects by filling null values in one DataFrame\n with non-null values from other DataFrame. The row and column indexes\n of the resulting DataFrame will be the union of the two.\n\n Parameters\n ----------\n other : DataFrame\n Provided DataFrame to use to fill null values.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.combine : Perform series-wise operation on two DataFrames\n using a given function.\n\n Examples\n --------\n >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine_first(df2)\n A B\n 0 1.0 3.0\n 1 0.0 4.0\n\n Null values still persist if the location of that null value\n does not exist in `other`\n\n >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])\n >>> df1.combine_first(df2)\n A B C\n 0 NaN 4.0 NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n def combiner(x, y):\n mask = extract_array(isna(x))\n\n x_values = extract_array(x, extract_numpy=True)\n y_values = extract_array(y, extract_numpy=True)\n\n # If the column y in other DataFrame is not in first DataFrame,\n # just return y_values.\n if y.name not in self.columns:\n return y_values\n\n return expressions.where(mask, y_values, x_values)\n\n combined = self.combine(other, combiner, overwrite=False)\n\n dtypes = {\n col: find_common_type([self.dtypes[col], other.dtypes[col]])\n for col in self.columns.intersection(other.columns)\n if not is_dtype_equal(combined.dtypes[col], self.dtypes[col])\n }\n\n if dtypes:\n combined = combined.astype(dtypes)\n\n return combined\n\n def update(\n self,\n other,\n join: str = \"left\",\n overwrite: bool = True,\n filter_func=None,\n errors: str = \"ignore\",\n ) -> None:\n \"\"\"\n Modify in place using non-NA values from another DataFrame.\n\n Aligns on indices. There is no return value.\n\n Parameters\n ----------\n other : DataFrame, or object coercible into a DataFrame\n Should have at least one matching index/column label\n with the original DataFrame. If a Series is passed,\n its name attribute must be set, and that will be\n used as the column name to align with the original DataFrame.\n join : {'left'}, default 'left'\n Only left join is implemented, keeping the index and columns of the\n original object.\n overwrite : bool, default True\n How to handle non-NA values for overlapping keys:\n\n * True: overwrite original DataFrame's values\n with values from `other`.\n * False: only update values that are NA in\n the original DataFrame.\n\n filter_func : callable(1d-array) -> bool 1d-array, optional\n Can choose to replace values other than NA. Return True for values\n that should be updated.\n errors : {'raise', 'ignore'}, default 'ignore'\n If 'raise', will raise a ValueError if the DataFrame and `other`\n both contain non-NA data in the same place.\n\n .. versionchanged:: 0.24.0\n Changed from `raise_conflict=False|True`\n to `errors='ignore'|'raise'`.\n\n Returns\n -------\n None : method directly changes calling object\n\n Raises\n ------\n ValueError\n * When `errors='raise'` and there's overlapping non-NA data.\n * When `errors` is not either `'ignore'` or `'raise'`\n NotImplementedError\n * If `join != 'left'`\n\n See Also\n --------\n dict.update : Similar method for dictionaries.\n DataFrame.merge : For column(s)-on-column(s) operations.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3],\n ... 'B': [400, 500, 600]})\n >>> new_df = pd.DataFrame({'B': [4, 5, 6],\n ... 'C': [7, 8, 9]})\n >>> df.update(new_df)\n >>> df\n A B\n 0 1 4\n 1 2 5\n 2 3 6\n\n The DataFrame's length does not increase as a result of the update,\n only values at matching index/column labels are updated.\n\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})\n >>> df.update(new_df)\n >>> df\n A B\n 0 a d\n 1 b e\n 2 c f\n\n For Series, its name attribute must be set.\n\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])\n >>> df.update(new_column)\n >>> df\n A B\n 0 a d\n 1 b y\n 2 c e\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])\n >>> df.update(new_df)\n >>> df\n A B\n 0 a x\n 1 b d\n 2 c e\n\n If `other` contains NaNs the corresponding values are not updated\n in the original dataframe.\n\n >>> df = pd.DataFrame({'A': [1, 2, 3],\n ... 'B': [400, 500, 600]})\n >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})\n >>> df.update(new_df)\n >>> df\n A B\n 0 1 4.0\n 1 2 500.0\n 2 3 6.0\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n # TODO: Support other joins\n if join != \"left\": # pragma: no cover\n raise NotImplementedError(\"Only left join is supported\")\n if errors not in [\"ignore\", \"raise\"]:\n raise ValueError(\"The parameter errors must be either 'ignore' or 'raise'\")\n\n if not isinstance(other, DataFrame):\n other = DataFrame(other)\n\n other = other.reindex_like(self)\n\n for col in self.columns:\n this = self[col]._values\n that = other[col]._values\n if filter_func is not None:\n with np.errstate(all=\"ignore\"):\n mask = ~filter_func(this) | isna(that)\n else:\n if errors == \"raise\":\n mask_this = notna(that)\n mask_that = notna(this)\n if any(mask_this & mask_that):\n raise ValueError(\"Data overlaps.\")\n\n if overwrite:\n mask = isna(that)\n else:\n mask = notna(this)\n\n # don't overwrite columns unnecessarily\n if mask.all():\n continue\n\n self[col] = expressions.where(mask, this, that)\n\n # ----------------------------------------------------------------------\n # Data reshaping\n @Appender(\n \"\"\"\nExamples\n--------\n>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',\n... 'Parrot', 'Parrot'],\n... 'Max Speed': [380., 370., 24., 26.]})\n>>> df\n Animal Max Speed\n0 Falcon 380.0\n1 Falcon 370.0\n2 Parrot 24.0\n3 Parrot 26.0\n>>> df.groupby(['Animal']).mean()\n Max Speed\nAnimal\nFalcon 375.0\nParrot 25.0\n\n**Hierarchical Indexes**\n\nWe can groupby different levels of a hierarchical index\nusing the `level` parameter:\n\n>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n... ['Captive', 'Wild', 'Captive', 'Wild']]\n>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))\n>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},\n... index=index)\n>>> df\n Max Speed\nAnimal Type\nFalcon Captive 390.0\n Wild 350.0\nParrot Captive 30.0\n Wild 20.0\n>>> df.groupby(level=0).mean()\n Max Speed\nAnimal\nFalcon 370.0\nParrot 25.0\n>>> df.groupby(level=\"Type\").mean()\n Max Speed\nType\nCaptive 210.0\nWild 185.0\n\nWe can also choose to include NA in group keys or not by setting\n`dropna` parameter, the default setting is `True`:\n\n>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]\n>>> df = pd.DataFrame(l, columns=[\"a\", \"b\", \"c\"])\n\n>>> df.groupby(by=[\"b\"]).sum()\n a c\nb\n1.0 2 3\n2.0 2 5\n\n>>> df.groupby(by=[\"b\"], dropna=False).sum()\n a c\nb\n1.0 2 3\n2.0 2 5\nNaN 1 4\n\n>>> l = [[\"a\", 12, 12], [None, 12.3, 33.], [\"b\", 12.3, 123], [\"a\", 1, 1]]\n>>> df = pd.DataFrame(l, columns=[\"a\", \"b\", \"c\"])\n\n>>> df.groupby(by=\"a\").sum()\n b c\na\na 13.0 13.0\nb 12.3 123.0\n\n>>> df.groupby(by=\"a\", dropna=False).sum()\n b c\na\na 13.0 13.0\nb 12.3 123.0\nNaN 12.3 33.0\n\"\"\"\n )\n @Appender(_shared_docs[\"groupby\"] % _shared_doc_kwargs)\n def groupby(\n self,\n by=None,\n axis: Axis = 0,\n level: Optional[Level] = None,\n as_index: bool = True,\n sort: bool = True,\n group_keys: bool = True,\n squeeze: bool = no_default,\n observed: bool = False,\n dropna: bool = True,\n ) -> DataFrameGroupBy:\n from pandas.core.groupby.generic import DataFrameGroupBy\n\n if squeeze is not no_default:\n warnings.warn(\n (\n \"The `squeeze` parameter is deprecated and \"\n \"will be removed in a future version.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n else:\n squeeze = False\n\n if level is None and by is None:\n raise TypeError(\"You have to supply one of 'by' and 'level'\")\n axis = self._get_axis_number(axis)\n\n return DataFrameGroupBy(\n obj=self,\n keys=by,\n axis=axis,\n level=level,\n as_index=as_index,\n sort=sort,\n group_keys=group_keys,\n squeeze=squeeze,\n observed=observed,\n dropna=dropna,\n )\n\n _shared_docs[\n \"pivot\"\n ] = \"\"\"\n Return reshaped DataFrame organized by given index / column values.\n\n Reshape data (produce a \"pivot\" table) based on column values. Uses\n unique values from specified `index` / `columns` to form axes of the\n resulting DataFrame. This function does not support data\n aggregation, multiple values will result in a MultiIndex in the\n columns. See the :ref:`User Guide <reshaping>` for more on reshaping.\n\n Parameters\n ----------%s\n index : str or object or a list of str, optional\n Column to use to make new frame's index. If None, uses\n existing index.\n\n .. versionchanged:: 1.1.0\n Also accept list of index names.\n\n columns : str or object or a list of str\n Column to use to make new frame's columns.\n\n .. versionchanged:: 1.1.0\n Also accept list of columns names.\n\n values : str, object or a list of the previous, optional\n Column(s) to use for populating new frame's values. If not\n specified, all remaining columns will be used and the result will\n have hierarchically indexed columns.\n\n Returns\n -------\n DataFrame\n Returns reshaped DataFrame.\n\n Raises\n ------\n ValueError:\n When there are any `index`, `columns` combinations with multiple\n values. `DataFrame.pivot_table` when you need to aggregate.\n\n See Also\n --------\n DataFrame.pivot_table : Generalization of pivot that can handle\n duplicate values for one index/column pair.\n DataFrame.unstack : Pivot based on the index values instead of a\n column.\n wide_to_long : Wide panel to long format. Less flexible but more\n user-friendly than melt.\n\n Notes\n -----\n For finer-tuned control, see hierarchical indexing documentation along\n with the related stack/unstack methods.\n\n Examples\n --------\n >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',\n ... 'two'],\n ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],\n ... 'baz': [1, 2, 3, 4, 5, 6],\n ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})\n >>> df\n foo bar baz zoo\n 0 one A 1 x\n 1 one B 2 y\n 2 one C 3 z\n 3 two A 4 q\n 4 two B 5 w\n 5 two C 6 t\n\n >>> df.pivot(index='foo', columns='bar', values='baz')\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(index='foo', columns='bar')['baz']\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])\n baz zoo\n bar A B C A B C\n foo\n one 1 2 3 x y z\n two 4 5 6 q w t\n\n You could also assign a list of column names or a list of index names.\n\n >>> df = pd.DataFrame({\n ... \"lev1\": [1, 1, 1, 2, 2, 2],\n ... \"lev2\": [1, 1, 2, 1, 1, 2],\n ... \"lev3\": [1, 2, 1, 2, 1, 2],\n ... \"lev4\": [1, 2, 3, 4, 5, 6],\n ... \"values\": [0, 1, 2, 3, 4, 5]})\n >>> df\n lev1 lev2 lev3 lev4 values\n 0 1 1 1 1 0\n 1 1 1 2 2 1\n 2 1 2 1 3 2\n 3 2 1 2 4 3\n 4 2 1 1 5 4\n 5 2 2 2 6 5\n\n >>> df.pivot(index=\"lev1\", columns=[\"lev2\", \"lev3\"],values=\"values\")\n lev2 1 2\n lev3 1 2 1 2\n lev1\n 1 0.0 1.0 2.0 NaN\n 2 4.0 3.0 NaN 5.0\n\n >>> df.pivot(index=[\"lev1\", \"lev2\"], columns=[\"lev3\"],values=\"values\")\n lev3 1 2\n lev1 lev2\n 1 1 0.0 1.0\n 2 2.0 NaN\n 2 1 4.0 3.0\n 2 NaN 5.0\n\n A ValueError is raised if there are any duplicates.\n\n >>> df = pd.DataFrame({\"foo\": ['one', 'one', 'two', 'two'],\n ... \"bar\": ['A', 'A', 'B', 'C'],\n ... \"baz\": [1, 2, 3, 4]})\n >>> df\n foo bar baz\n 0 one A 1\n 1 one A 2\n 2 two B 3\n 3 two C 4\n\n Notice that the first two rows are the same for our `index`\n and `columns` arguments.\n\n >>> df.pivot(index='foo', columns='bar', values='baz')\n Traceback (most recent call last):\n ...\n ValueError: Index contains duplicate entries, cannot reshape\n \"\"\"\n\n @Substitution(\"\")\n @Appender(_shared_docs[\"pivot\"])\n def pivot(self, index=None, columns=None, values=None) -> DataFrame:\n from pandas.core.reshape.pivot import pivot\n\n return pivot(self, index=index, columns=columns, values=values)\n\n _shared_docs[\n \"pivot_table\"\n ] = \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame.\n\n The levels in the pivot table will be stored in MultiIndex objects\n (hierarchical indexes) on the index and columns of the result DataFrame.\n\n Parameters\n ----------%s\n values : column to aggregate, optional\n index : column, Grouper, array, or list of the previous\n If an array is passed, it must be the same length as the data. The\n list can contain any of the other types (except list).\n Keys to group by on the pivot table index. If an array is passed,\n it is being used as the same manner as column values.\n columns : column, Grouper, array, or list of the previous\n If an array is passed, it must be the same length as the data. The\n list can contain any of the other types (except list).\n Keys to group by on the pivot table column. If an array is passed,\n it is being used as the same manner as column values.\n aggfunc : function, list of functions, dict, default numpy.mean\n If list of functions passed, the resulting pivot table will have\n hierarchical columns whose top level are the function names\n (inferred from the function objects themselves)\n If dict is passed, the key is column to aggregate and value\n is function or list of functions.\n fill_value : scalar, default None\n Value to replace missing values with (in the resulting pivot table,\n after aggregation).\n margins : bool, default False\n Add all row / columns (e.g. for subtotal / grand totals).\n dropna : bool, default True\n Do not include columns whose entries are all NaN.\n margins_name : str, default 'All'\n Name of the row / column that will contain the totals\n when margins is True.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionchanged:: 0.25.0\n\n Returns\n -------\n DataFrame\n An Excel style pivot table.\n\n See Also\n --------\n DataFrame.pivot : Pivot without aggregation that can handle\n non-numeric data.\n DataFrame.melt: Unpivot a DataFrame from wide to long format,\n optionally leaving identifiers set.\n wide_to_long : Wide panel to long format. Less flexible but more\n user-friendly than melt.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [\"foo\", \"foo\", \"foo\", \"foo\", \"foo\",\n ... \"bar\", \"bar\", \"bar\", \"bar\"],\n ... \"B\": [\"one\", \"one\", \"one\", \"two\", \"two\",\n ... \"one\", \"one\", \"two\", \"two\"],\n ... \"C\": [\"small\", \"large\", \"large\", \"small\",\n ... \"small\", \"large\", \"small\", \"small\",\n ... \"large\"],\n ... \"D\": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n ... \"E\": [2, 4, 5, 5, 6, 6, 8, 9, 9]})\n >>> df\n A B C D E\n 0 foo one small 1 2\n 1 foo one large 2 4\n 2 foo one large 2 5\n 3 foo two small 3 5\n 4 foo two small 3 6\n 5 bar one large 4 6\n 6 bar one small 5 8\n 7 bar two small 6 9\n 8 bar two large 7 9\n\n This first example aggregates values by taking the sum.\n\n >>> table = pd.pivot_table(df, values='D', index=['A', 'B'],\n ... columns=['C'], aggfunc=np.sum)\n >>> table\n C large small\n A B\n bar one 4.0 5.0\n two 7.0 6.0\n foo one 4.0 1.0\n two NaN 6.0\n\n We can also fill missing values using the `fill_value` parameter.\n\n >>> table = pd.pivot_table(df, values='D', index=['A', 'B'],\n ... columns=['C'], aggfunc=np.sum, fill_value=0)\n >>> table\n C large small\n A B\n bar one 4 5\n two 7 6\n foo one 4 1\n two 0 6\n\n The next example aggregates by taking the mean across multiple columns.\n\n >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],\n ... aggfunc={'D': np.mean,\n ... 'E': np.mean})\n >>> table\n D E\n A C\n bar large 5.500000 7.500000\n small 5.500000 8.500000\n foo large 2.000000 4.500000\n small 2.333333 4.333333\n\n We can also calculate multiple types of aggregations for any given\n value column.\n\n >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],\n ... aggfunc={'D': np.mean,\n ... 'E': [min, max, np.mean]})\n >>> table\n D E\n mean max mean min\n A C\n bar large 5.500000 9.0 7.500000 6.0\n small 5.500000 9.0 8.500000 8.0\n foo large 2.000000 5.0 4.500000 4.0\n small 2.333333 6.0 4.333333 2.0\n \"\"\"\n\n @Substitution(\"\")\n @Appender(_shared_docs[\"pivot_table\"])\n def pivot_table(\n self,\n values=None,\n index=None,\n columns=None,\n aggfunc=\"mean\",\n fill_value=None,\n margins=False,\n dropna=True,\n margins_name=\"All\",\n observed=False,\n ) -> DataFrame:\n from pandas.core.reshape.pivot import pivot_table\n\n return pivot_table(\n self,\n values=values,\n index=index,\n columns=columns,\n aggfunc=aggfunc,\n fill_value=fill_value,\n margins=margins,\n dropna=dropna,\n margins_name=margins_name,\n observed=observed,\n )\n\n def stack(self, level: Level = -1, dropna: bool = True):\n \"\"\"\n Stack the prescribed level(s) from columns to index.\n\n Return a reshaped DataFrame or Series having a multi-level\n index with one or more new inner-most levels compared to the current\n DataFrame. The new inner-most levels are created by pivoting the\n columns of the current dataframe:\n\n - if the columns have a single level, the output is a Series;\n - if the columns have multiple levels, the new index\n level(s) is (are) taken from the prescribed level(s) and\n the output is a DataFrame.\n\n Parameters\n ----------\n level : int, str, list, default -1\n Level(s) to stack from the column axis onto the index\n axis, defined as one index or label, or a list of indices\n or labels.\n dropna : bool, default True\n Whether to drop rows in the resulting Frame/Series with\n missing values. Stacking a column level onto the index\n axis can create combinations of index and column values\n that are missing from the original dataframe. See Examples\n section.\n\n Returns\n -------\n DataFrame or Series\n Stacked dataframe or series.\n\n See Also\n --------\n DataFrame.unstack : Unstack prescribed level(s) from index axis\n onto column axis.\n DataFrame.pivot : Reshape dataframe from long format to wide\n format.\n DataFrame.pivot_table : Create a spreadsheet-style pivot table\n as a DataFrame.\n\n Notes\n -----\n The function is named by analogy with a collection of books\n being reorganized from being side by side on a horizontal\n position (the columns of the dataframe) to being stacked\n vertically on top of each other (in the index of the\n dataframe).\n\n Examples\n --------\n **Single level columns**\n\n >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],\n ... index=['cat', 'dog'],\n ... columns=['weight', 'height'])\n\n Stacking a dataframe with a single level column axis returns a Series:\n\n >>> df_single_level_cols\n weight height\n cat 0 1\n dog 2 3\n >>> df_single_level_cols.stack()\n cat weight 0\n height 1\n dog weight 2\n height 3\n dtype: int64\n\n **Multi level columns: simple case**\n\n >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('weight', 'pounds')])\n >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],\n ... index=['cat', 'dog'],\n ... columns=multicol1)\n\n Stacking a dataframe with a multi-level column axis:\n\n >>> df_multi_level_cols1\n weight\n kg pounds\n cat 1 2\n dog 2 4\n >>> df_multi_level_cols1.stack()\n weight\n cat kg 1\n pounds 2\n dog kg 2\n pounds 4\n\n **Missing values**\n\n >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('height', 'm')])\n >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n It is common to have missing values when stacking a dataframe\n with multi-level columns, as the stacked dataframe typically\n has more values than the original dataframe. Missing values\n are filled with NaNs:\n\n >>> df_multi_level_cols2\n weight height\n kg m\n cat 1.0 2.0\n dog 3.0 4.0\n >>> df_multi_level_cols2.stack()\n height weight\n cat kg NaN 1.0\n m 2.0 NaN\n dog kg NaN 3.0\n m 4.0 NaN\n\n **Prescribing the level(s) to be stacked**\n\n The first parameter controls which level or levels are stacked:\n\n >>> df_multi_level_cols2.stack(0)\n kg m\n cat height NaN 2.0\n weight 1.0 NaN\n dog height NaN 4.0\n weight 3.0 NaN\n >>> df_multi_level_cols2.stack([0, 1])\n cat height m 2.0\n weight kg 1.0\n dog height m 4.0\n weight kg 3.0\n dtype: float64\n\n **Dropping missing values**\n\n >>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n Note that rows where all values are missing are dropped by\n default but this behaviour can be controlled via the dropna\n keyword parameter:\n\n >>> df_multi_level_cols3\n weight height\n kg m\n cat NaN 1.0\n dog 2.0 3.0\n >>> df_multi_level_cols3.stack(dropna=False)\n height weight\n cat kg NaN NaN\n m 1.0 NaN\n dog kg NaN 2.0\n m 3.0 NaN\n >>> df_multi_level_cols3.stack(dropna=True)\n height weight\n cat m 1.0 NaN\n dog kg NaN 2.0\n m 3.0 NaN\n \"\"\"\n from pandas.core.reshape.reshape import stack, stack_multiple\n\n if isinstance(level, (tuple, list)):\n result = stack_multiple(self, level, dropna=dropna)\n else:\n result = stack(self, level, dropna=dropna)\n\n return result.__finalize__(self, method=\"stack\")\n\n def explode(\n self, column: Union[str, Tuple], ignore_index: bool = False\n ) -> DataFrame:\n \"\"\"\n Transform each element of a list-like to a row, replicating index values.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n column : str or tuple\n Column to explode.\n ignore_index : bool, default False\n If True, the resulting index will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n Exploded lists to rows of the subset columns;\n index will be duplicated for these rows.\n\n Raises\n ------\n ValueError :\n if columns of the frame are not unique.\n\n See Also\n --------\n DataFrame.unstack : Pivot a level of the (necessarily hierarchical)\n index labels.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n Series.explode : Explode a DataFrame from list-like columns to long format.\n\n Notes\n -----\n This routine will explode list-likes including lists, tuples, sets,\n Series, and np.ndarray. The result dtype of the subset rows will\n be object. Scalars will be returned unchanged, and empty list-likes will\n result in a np.nan for that row. In addition, the ordering of rows in the\n output will be non-deterministic when exploding sets.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})\n >>> df\n A B\n 0 [1, 2, 3] 1\n 1 foo 1\n 2 [] 1\n 3 [3, 4] 1\n\n >>> df.explode('A')\n A B\n 0 1 1\n 0 2 1\n 0 3 1\n 1 foo 1\n 2 NaN 1\n 3 3 1\n 3 4 1\n \"\"\"\n if not (is_scalar(column) or isinstance(column, tuple)):\n raise ValueError(\"column must be a scalar\")\n if not self.columns.is_unique:\n raise ValueError(\"columns must be unique\")\n\n df = self.reset_index(drop=True)\n result = df[column].explode()\n result = df.drop([column], axis=1).join(result)\n if ignore_index:\n result.index = ibase.default_index(len(result))\n else:\n result.index = self.index.take(result.index)\n result = result.reindex(columns=self.columns, copy=False)\n\n return result\n\n def unstack(self, level=-1, fill_value=None):\n \"\"\"\n Pivot a level of the (necessarily hierarchical) index labels.\n\n Returns a DataFrame having a new level of column labels whose inner-most level\n consists of the pivoted index labels.\n\n If the index is not a MultiIndex, the output will be a Series\n (the analogue of stack when the columns are not a MultiIndex).\n\n Parameters\n ----------\n level : int, str, or list of these, default -1 (last level)\n Level(s) of index to unstack, can pass level name.\n fill_value : int, str or dict\n Replace NaN with this value if the unstack produces missing values.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n DataFrame.pivot : Pivot a table based on column values.\n DataFrame.stack : Pivot a level of the column labels (inverse operation\n from `unstack`).\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),\n ... ('two', 'a'), ('two', 'b')])\n >>> s = pd.Series(np.arange(1.0, 5.0), index=index)\n >>> s\n one a 1.0\n b 2.0\n two a 3.0\n b 4.0\n dtype: float64\n\n >>> s.unstack(level=-1)\n a b\n one 1.0 2.0\n two 3.0 4.0\n\n >>> s.unstack(level=0)\n one two\n a 1.0 3.0\n b 2.0 4.0\n\n >>> df = s.unstack(level=0)\n >>> df.unstack()\n one a 1.0\n b 2.0\n two a 3.0\n b 4.0\n dtype: float64\n \"\"\"\n from pandas.core.reshape.reshape import unstack\n\n result = unstack(self, level, fill_value)\n\n return result.__finalize__(self, method=\"unstack\")\n\n @Appender(_shared_docs[\"melt\"] % {\"caller\": \"df.melt(\", \"other\": \"melt\"})\n def melt(\n self,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name=\"value\",\n col_level: Optional[Level] = None,\n ignore_index=True,\n ) -> DataFrame:\n\n return melt(\n self,\n id_vars=id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n col_level=col_level,\n ignore_index=ignore_index,\n )\n\n # ----------------------------------------------------------------------\n # Time series-related\n\n @doc(\n Series.diff,\n klass=\"Dataframe\",\n extra_params=\"axis : {0 or 'index', 1 or 'columns'}, default 0\\n \"\n \"Take difference over rows (0) or columns (1).\\n\",\n other_klass=\"Series\",\n examples=dedent(\n \"\"\"\n Difference with previous row\n\n >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [1, 1, 2, 3, 5, 8],\n ... 'c': [1, 4, 9, 16, 25, 36]})\n >>> df\n a b c\n 0 1 1 1\n 1 2 1 4\n 2 3 2 9\n 3 4 3 16\n 4 5 5 25\n 5 6 8 36\n\n >>> df.diff()\n a b c\n 0 NaN NaN NaN\n 1 1.0 0.0 3.0\n 2 1.0 1.0 5.0\n 3 1.0 1.0 7.0\n 4 1.0 2.0 9.0\n 5 1.0 3.0 11.0\n\n Difference with previous column\n\n >>> df.diff(axis=1)\n a b c\n 0 NaN 0 0\n 1 NaN -1 3\n 2 NaN -1 7\n 3 NaN -1 13\n 4 NaN 0 20\n 5 NaN 2 28\n\n Difference with 3rd previous row\n\n >>> df.diff(periods=3)\n a b c\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 3.0 2.0 15.0\n 4 3.0 4.0 21.0\n 5 3.0 6.0 27.0\n\n Difference with following row\n\n >>> df.diff(periods=-1)\n a b c\n 0 -1.0 0.0 -3.0\n 1 -1.0 -1.0 -5.0\n 2 -1.0 -1.0 -7.0\n 3 -1.0 -2.0 -9.0\n 4 -1.0 -3.0 -11.0\n 5 NaN NaN NaN\n\n Overflow in input dtype\n\n >>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)\n >>> df.diff()\n a\n 0 NaN\n 1 255.0\"\"\"\n ),\n )\n def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:\n if not isinstance(periods, int):\n if not (is_float(periods) and periods.is_integer()):\n raise ValueError(\"periods must be an integer\")\n periods = int(periods)\n\n bm_axis = self._get_block_manager_axis(axis)\n\n if bm_axis == 0 and periods != 0:\n return self - self.shift(periods, axis=axis)\n\n new_data = self._mgr.diff(n=periods, axis=bm_axis)\n return self._constructor(new_data).__finalize__(self, \"diff\")\n\n # ----------------------------------------------------------------------\n # Function application\n\n def _gotitem(\n self,\n key: IndexLabel,\n ndim: int,\n subset: Optional[FrameOrSeriesUnion] = None,\n ) -> FrameOrSeriesUnion:\n \"\"\"\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n if subset is None:\n subset = self\n elif subset.ndim == 1: # is Series\n return subset\n\n # TODO: _shallow_copy(subset)?\n return subset[key]\n\n _agg_summary_and_see_also_doc = dedent(\n \"\"\"\n The aggregation operations are always performed over an axis, either the\n index (default) or the column axis. This behavior is different from\n `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,\n `var`), where the default is to compute the aggregation of the flattened\n array, e.g., ``numpy.mean(arr_2d)`` as opposed to\n ``numpy.mean(arr_2d, axis=0)``.\n\n `agg` is an alias for `aggregate`. Use the alias.\n\n See Also\n --------\n DataFrame.apply : Perform any type of operations.\n DataFrame.transform : Perform transformation type operations.\n core.groupby.GroupBy : Perform operations over groups.\n core.resample.Resampler : Perform operations over resampled bins.\n core.window.Rolling : Perform operations over rolling window.\n core.window.Expanding : Perform operations over expanding window.\n core.window.ExponentialMovingWindow : Perform operation over exponential weighted\n window.\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9],\n ... [np.nan, np.nan, np.nan]],\n ... columns=['A', 'B', 'C'])\n\n Aggregate these functions over the rows.\n\n >>> df.agg(['sum', 'min'])\n A B C\n sum 12.0 15.0 18.0\n min 1.0 2.0 3.0\n\n Different aggregations per column.\n\n >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})\n A B\n sum 12.0 NaN\n min 1.0 2.0\n max NaN 8.0\n\n Aggregate different functions over the columns and rename the index of the resulting\n DataFrame.\n\n >>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))\n A B C\n x 7.0 NaN NaN\n y NaN 2.0 NaN\n z NaN NaN 6.0\n\n Aggregate over the columns.\n\n >>> df.agg(\"mean\", axis=\"columns\")\n 0 2.0\n 1 5.0\n 2 8.0\n 3 NaN\n dtype: float64\n \"\"\"\n )\n\n @doc(\n _shared_docs[\"aggregate\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n see_also=_agg_summary_and_see_also_doc,\n examples=_agg_examples_doc,\n )\n def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs):\n axis = self._get_axis_number(axis)\n\n relabeling, func, columns, order = reconstruct_func(func, **kwargs)\n\n result = None\n try:\n result, how = self._aggregate(func, axis, *args, **kwargs)\n except TypeError as err:\n exc = TypeError(\n \"DataFrame constructor called with \"\n f\"incompatible data and dtype: {err}\"\n )\n raise exc from err\n if result is None:\n return self.apply(func, axis=axis, args=args, **kwargs)\n\n if relabeling:\n # This is to keep the order to columns occurrence unchanged, and also\n # keep the order of new columns occurrence unchanged\n\n # For the return values of reconstruct_func, if relabeling is\n # False, columns and order will be None.\n assert columns is not None\n assert order is not None\n\n result_in_dict = relabel_result(result, func, columns, order)\n result = DataFrame(result_in_dict, index=columns)\n\n return result\n\n def _aggregate(self, arg, axis: Axis = 0, *args, **kwargs):\n from pandas.core.apply import frame_apply\n\n op = frame_apply(\n self if axis == 0 else self.T,\n func=arg,\n axis=0,\n args=args,\n kwds=kwargs,\n )\n result, how = op.agg()\n\n if axis == 1:\n # NDFrame.aggregate returns a tuple, and we need to transpose\n # only result\n result = result.T if result is not None else result\n\n return result, how\n\n agg = aggregate\n\n @doc(\n _shared_docs[\"transform\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n )\n def transform(\n self, func: AggFuncType, axis: Axis = 0, *args, **kwargs\n ) -> DataFrame:\n result = transform(self, func, axis, *args, **kwargs)\n assert isinstance(result, DataFrame)\n return result\n\n def apply(\n self,\n func: AggFuncType,\n axis: Axis = 0,\n raw: bool = False,\n result_type=None,\n args=(),\n **kwds,\n ):\n \"\"\"\n Apply a function along an axis of the DataFrame.\n\n Objects passed to the function are Series objects whose index is\n either the DataFrame's index (``axis=0``) or the DataFrame's columns\n (``axis=1``). By default (``result_type=None``), the final return type\n is inferred from the return type of the applied function. Otherwise,\n it depends on the `result_type` argument.\n\n Parameters\n ----------\n func : function\n Function to apply to each column or row.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis along which the function is applied:\n\n * 0 or 'index': apply function to each column.\n * 1 or 'columns': apply function to each row.\n\n raw : bool, default False\n Determines if row or column is passed as a Series or ndarray object:\n\n * ``False`` : passes each row or column as a Series to the\n function.\n * ``True`` : the passed function will receive ndarray objects\n instead.\n If you are just applying a NumPy reduction function this will\n achieve much better performance.\n\n result_type : {'expand', 'reduce', 'broadcast', None}, default None\n These only act when ``axis=1`` (columns):\n\n * 'expand' : list-like results will be turned into columns.\n * 'reduce' : returns a Series if possible rather than expanding\n list-like results. This is the opposite of 'expand'.\n * 'broadcast' : results will be broadcast to the original shape\n of the DataFrame, the original index and columns will be\n retained.\n\n The default behaviour (None) depends on the return value of the\n applied function: list-like results will be returned as a Series\n of those. However if the apply function returns a Series these\n are expanded to columns.\n args : tuple\n Positional arguments to pass to `func` in addition to the\n array/series.\n **kwds\n Additional keyword arguments to pass as keywords arguments to\n `func`.\n\n Returns\n -------\n Series or DataFrame\n Result of applying ``func`` along the given axis of the\n DataFrame.\n\n See Also\n --------\n DataFrame.applymap: For elementwise operations.\n DataFrame.aggregate: Only perform aggregating type operations.\n DataFrame.transform: Only perform transforming type operations.\n\n Examples\n --------\n >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])\n >>> df\n A B\n 0 4 9\n 1 4 9\n 2 4 9\n\n Using a numpy universal function (in this case the same as\n ``np.sqrt(df)``):\n\n >>> df.apply(np.sqrt)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n Using a reducing function on either axis\n\n >>> df.apply(np.sum, axis=0)\n A 12\n B 27\n dtype: int64\n\n >>> df.apply(np.sum, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n Returning a list-like will result in a Series\n\n >>> df.apply(lambda x: [1, 2], axis=1)\n 0 [1, 2]\n 1 [1, 2]\n 2 [1, 2]\n dtype: object\n\n Passing ``result_type='expand'`` will expand list-like results\n to columns of a Dataframe\n\n >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')\n 0 1\n 0 1 2\n 1 1 2\n 2 1 2\n\n Returning a Series inside the function is similar to passing\n ``result_type='expand'``. The resulting column names\n will be the Series index.\n\n >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)\n foo bar\n 0 1 2\n 1 1 2\n 2 1 2\n\n Passing ``result_type='broadcast'`` will ensure the same shape\n result, whether list-like or scalar is returned by the function,\n and broadcast it along the axis. The resulting column names will\n be the originals.\n\n >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')\n A B\n 0 1 2\n 1 1 2\n 2 1 2\n \"\"\"\n from pandas.core.apply import frame_apply\n\n op = frame_apply(\n self,\n func=func,\n axis=axis,\n raw=raw,\n result_type=result_type,\n args=args,\n kwds=kwds,\n )\n return op.apply()\n\n def applymap(\n self, func: PythonFuncType, na_action: Optional[str] = None\n ) -> DataFrame:\n \"\"\"\n Apply a function to a Dataframe elementwise.\n\n This method applies a function that accepts and returns a scalar\n to every element of a DataFrame.\n\n Parameters\n ----------\n func : callable\n Python function, returns a single value from a single value.\n na_action : {None, 'ignore'}, default None\n If ‘ignore’, propagate NaN values, without passing them to func.\n\n .. versionadded:: 1.2\n\n Returns\n -------\n DataFrame\n Transformed DataFrame.\n\n See Also\n --------\n DataFrame.apply : Apply a function along input axis of DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])\n >>> df\n 0 1\n 0 1.000 2.120\n 1 3.356 4.567\n\n >>> df.applymap(lambda x: len(str(x)))\n 0 1\n 0 3 4\n 1 5 5\n\n Like Series.map, NA values can be ignored:\n\n >>> df_copy = df.copy()\n >>> df_copy.iloc[0, 0] = pd.NA\n >>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')\n 0 1\n 0 <NA> 4\n 1 5 5\n\n Note that a vectorized version of `func` often exists, which will\n be much faster. You could square each number elementwise.\n\n >>> df.applymap(lambda x: x**2)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n\n But it's better to avoid applymap in that case.\n\n >>> df ** 2\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n \"\"\"\n if na_action not in {\"ignore\", None}:\n raise ValueError(\n f\"na_action must be 'ignore' or None. Got {repr(na_action)}\"\n )\n ignore_na = na_action == \"ignore\"\n\n # if we have a dtype == 'M8[ns]', provide boxed values\n def infer(x):\n if x.empty:\n return lib.map_infer(x, func, ignore_na=ignore_na)\n return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)\n\n return self.apply(infer).__finalize__(self, \"applymap\")\n\n # ----------------------------------------------------------------------\n # Merging / joining methods\n\n def append(\n self,\n other,\n ignore_index: bool = False,\n verify_integrity: bool = False,\n sort: bool = False,\n ) -> DataFrame:\n \"\"\"\n Append rows of `other` to the end of caller, returning a new object.\n\n Columns in `other` that are not in the caller are added as new columns.\n\n Parameters\n ----------\n other : DataFrame or Series/dict-like object, or list of these\n The data to append.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n verify_integrity : bool, default False\n If True, raise ValueError on creating index with duplicates.\n sort : bool, default False\n Sort columns if the columns of `self` and `other` are not aligned.\n\n .. versionchanged:: 1.0.0\n\n Changed to not sort by default.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n concat : General function to concatenate DataFrame or Series objects.\n\n Notes\n -----\n If a list of dict/series is passed and the keys are all contained in\n the DataFrame's index, the order of the columns in the resulting\n DataFrame will be unchanged.\n\n Iteratively appending rows to a DataFrame can be more computationally\n intensive than a single concatenate. A better solution is to append\n those rows to a list and then concatenate the list with the original\n DataFrame all at once.\n\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))\n >>> df\n A B\n 0 1 2\n 1 3 4\n >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))\n >>> df.append(df2)\n A B\n 0 1 2\n 1 3 4\n 0 5 6\n 1 7 8\n\n With `ignore_index` set to True:\n\n >>> df.append(df2, ignore_index=True)\n A B\n 0 1 2\n 1 3 4\n 2 5 6\n 3 7 8\n\n The following, while not recommended methods for generating DataFrames,\n show two ways to generate a DataFrame from multiple data sources.\n\n Less efficient:\n\n >>> df = pd.DataFrame(columns=['A'])\n >>> for i in range(5):\n ... df = df.append({'A': i}, ignore_index=True)\n >>> df\n A\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n\n More efficient:\n\n >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\n ... ignore_index=True)\n A\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n \"\"\"\n if isinstance(other, (Series, dict)):\n if isinstance(other, dict):\n if not ignore_index:\n raise TypeError(\"Can only append a dict if ignore_index=True\")\n other = Series(other)\n if other.name is None and not ignore_index:\n raise TypeError(\n \"Can only append a Series if ignore_index=True \"\n \"or if the Series has a name\"\n )\n\n index = Index([other.name], name=self.index.name)\n idx_diff = other.index.difference(self.columns)\n try:\n combined_columns = self.columns.append(idx_diff)\n except TypeError:\n combined_columns = self.columns.astype(object).append(idx_diff)\n other = (\n other.reindex(combined_columns, copy=False)\n .to_frame()\n .T.infer_objects()\n .rename_axis(index.names, copy=False)\n )\n if not self.columns.equals(combined_columns):\n self = self.reindex(columns=combined_columns)\n elif isinstance(other, list):\n if not other:\n pass\n elif not isinstance(other[0], DataFrame):\n other = DataFrame(other)\n if (self.columns.get_indexer(other.columns) >= 0).all():\n other = other.reindex(columns=self.columns)\n\n from pandas.core.reshape.concat import concat\n\n if isinstance(other, (list, tuple)):\n to_concat = [self, *other]\n else:\n to_concat = [self, other]\n return (\n concat(\n to_concat,\n ignore_index=ignore_index,\n verify_integrity=verify_integrity,\n sort=sort,\n )\n ).__finalize__(self, method=\"append\")\n\n def join(\n self,\n other: FrameOrSeriesUnion,\n on: Optional[IndexLabel] = None,\n how: str = \"left\",\n lsuffix: str = \"\",\n rsuffix: str = \"\",\n sort: bool = False,\n ) -> DataFrame:\n \"\"\"\n Join columns of another DataFrame.\n\n Join columns with `other` DataFrame either on index or on a key\n column. Efficiently join multiple DataFrame objects by index at once by\n passing a list.\n\n Parameters\n ----------\n other : DataFrame, Series, or list of DataFrame\n Index should be similar to one of the columns in this one. If a\n Series is passed, its name attribute must be set, and that will be\n used as the column name in the resulting joined DataFrame.\n on : str, list of str, or array-like, optional\n Column or index level name(s) in the caller to join on the index\n in `other`, otherwise joins index-on-index. If multiple\n values given, the `other` DataFrame must have a MultiIndex. Can\n pass an array as the join key if it is not already contained in\n the calling DataFrame. Like an Excel VLOOKUP operation.\n how : {'left', 'right', 'outer', 'inner'}, default 'left'\n How to handle the operation of the two objects.\n\n * left: use calling frame's index (or column if on is specified)\n * right: use `other`'s index.\n * outer: form union of calling frame's index (or column if on is\n specified) with `other`'s index, and sort it.\n lexicographically.\n * inner: form intersection of calling frame's index (or column if\n on is specified) with `other`'s index, preserving the order\n of the calling's one.\n lsuffix : str, default ''\n Suffix to use from left frame's overlapping columns.\n rsuffix : str, default ''\n Suffix to use from right frame's overlapping columns.\n sort : bool, default False\n Order result DataFrame lexicographically by the join key. If False,\n the order of the join key depends on the join type (how keyword).\n\n Returns\n -------\n DataFrame\n A dataframe containing columns from both the caller and `other`.\n\n See Also\n --------\n DataFrame.merge : For column(s)-on-column(s) operations.\n\n Notes\n -----\n Parameters `on`, `lsuffix`, and `rsuffix` are not supported when\n passing a list of `DataFrame` objects.\n\n Support for specifying index levels as the `on` parameter was added\n in version 0.23.0.\n\n Examples\n --------\n >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],\n ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})\n\n >>> df\n key A\n 0 K0 A0\n 1 K1 A1\n 2 K2 A2\n 3 K3 A3\n 4 K4 A4\n 5 K5 A5\n\n >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],\n ... 'B': ['B0', 'B1', 'B2']})\n\n >>> other\n key B\n 0 K0 B0\n 1 K1 B1\n 2 K2 B2\n\n Join DataFrames using their indexes.\n\n >>> df.join(other, lsuffix='_caller', rsuffix='_other')\n key_caller A key_other B\n 0 K0 A0 K0 B0\n 1 K1 A1 K1 B1\n 2 K2 A2 K2 B2\n 3 K3 A3 NaN NaN\n 4 K4 A4 NaN NaN\n 5 K5 A5 NaN NaN\n\n If we want to join using the key columns, we need to set key to be\n the index in both `df` and `other`. The joined DataFrame will have\n key as its index.\n\n >>> df.set_index('key').join(other.set_index('key'))\n A B\n key\n K0 A0 B0\n K1 A1 B1\n K2 A2 B2\n K3 A3 NaN\n K4 A4 NaN\n K5 A5 NaN\n\n Another option to join using the key columns is to use the `on`\n parameter. DataFrame.join always uses `other`'s index but we can use\n any column in `df`. This method preserves the original DataFrame's\n index in the result.\n\n >>> df.join(other.set_index('key'), on='key')\n key A B\n 0 K0 A0 B0\n 1 K1 A1 B1\n 2 K2 A2 B2\n 3 K3 A3 NaN\n 4 K4 A4 NaN\n 5 K5 A5 NaN\n \"\"\"\n return self._join_compat(\n other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort\n )\n\n def _join_compat(\n self,\n other: FrameOrSeriesUnion,\n on: Optional[IndexLabel] = None,\n how: str = \"left\",\n lsuffix: str = \"\",\n rsuffix: str = \"\",\n sort: bool = False,\n ):\n from pandas.core.reshape.concat import concat\n from pandas.core.reshape.merge import merge\n\n if isinstance(other, Series):\n if other.name is None:\n raise ValueError(\"Other Series must have a name\")\n other = DataFrame({other.name: other})\n\n if isinstance(other, DataFrame):\n if how == \"cross\":\n return merge(\n self,\n other,\n how=how,\n on=on,\n suffixes=(lsuffix, rsuffix),\n sort=sort,\n )\n return merge(\n self,\n other,\n left_on=on,\n how=how,\n left_index=on is None,\n right_index=True,\n suffixes=(lsuffix, rsuffix),\n sort=sort,\n )\n else:\n if on is not None:\n raise ValueError(\n \"Joining multiple DataFrames only supported for joining on index\"\n )\n\n frames = [self] + list(other)\n\n can_concat = all(df.index.is_unique for df in frames)\n\n # join indexes only using concat\n if can_concat:\n if how == \"left\":\n res = concat(\n frames, axis=1, join=\"outer\", verify_integrity=True, sort=sort\n )\n return res.reindex(self.index, copy=False)\n else:\n return concat(\n frames, axis=1, join=how, verify_integrity=True, sort=sort\n )\n\n joined = frames[0]\n\n for frame in frames[1:]:\n joined = merge(\n joined, frame, how=how, left_index=True, right_index=True\n )\n\n return joined\n\n @Substitution(\"\")\n @Appender(_merge_doc, indents=2)\n def merge(\n self,\n right: FrameOrSeriesUnion,\n how: str = \"inner\",\n on: Optional[IndexLabel] = None,\n left_on: Optional[IndexLabel] = None,\n right_on: Optional[IndexLabel] = None,\n left_index: bool = False,\n right_index: bool = False,\n sort: bool = False,\n suffixes: Suffixes = (\"_x\", \"_y\"),\n copy: bool = True,\n indicator: bool = False,\n validate: Optional[str] = None,\n ) -> DataFrame:\n from pandas.core.reshape.merge import merge\n\n return merge(\n self,\n right,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n sort=sort,\n suffixes=suffixes,\n copy=copy,\n indicator=indicator,\n validate=validate,\n )\n\n def round(self, decimals=0, *args, **kwargs) -> DataFrame:\n \"\"\"\n Round a DataFrame to a variable number of decimal places.\n\n Parameters\n ----------\n decimals : int, dict, Series\n Number of decimal places to round each column to. If an int is\n given, round each column to the same number of places.\n Otherwise dict and Series round to variable numbers of places.\n Column names should be in the keys if `decimals` is a\n dict-like, or in the index if `decimals` is a Series. Any\n columns not included in `decimals` will be left as is. Elements\n of `decimals` which are not columns of the input will be\n ignored.\n *args\n Additional keywords have no effect but might be accepted for\n compatibility with numpy.\n **kwargs\n Additional keywords have no effect but might be accepted for\n compatibility with numpy.\n\n Returns\n -------\n DataFrame\n A DataFrame with the affected columns rounded to the specified\n number of decimal places.\n\n See Also\n --------\n numpy.around : Round a numpy array to the given number of decimals.\n Series.round : Round a Series to the given number of decimals.\n\n Examples\n --------\n >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],\n ... columns=['dogs', 'cats'])\n >>> df\n dogs cats\n 0 0.21 0.32\n 1 0.01 0.67\n 2 0.66 0.03\n 3 0.21 0.18\n\n By providing an integer each column is rounded to the same number\n of decimal places\n\n >>> df.round(1)\n dogs cats\n 0 0.2 0.3\n 1 0.0 0.7\n 2 0.7 0.0\n 3 0.2 0.2\n\n With a dict, the number of places for specific columns can be\n specified with the column names as key and the number of decimal\n places as value\n\n >>> df.round({'dogs': 1, 'cats': 0})\n dogs cats\n 0 0.2 0.0\n 1 0.0 1.0\n 2 0.7 0.0\n 3 0.2 0.0\n\n Using a Series, the number of places for specific columns can be\n specified with the column names as index and the number of\n decimal places as value\n\n >>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])\n >>> df.round(decimals)\n dogs cats\n 0 0.2 0.0\n 1 0.0 1.0\n 2 0.7 0.0\n 3 0.2 0.0\n \"\"\"\n from pandas.core.reshape.concat import concat\n\n def _dict_round(df, decimals):\n for col, vals in df.items():\n try:\n yield _series_round(vals, decimals[col])\n except KeyError:\n yield vals\n\n def _series_round(s, decimals):\n if is_integer_dtype(s) or is_float_dtype(s):\n return s.round(decimals)\n return s\n\n nv.validate_round(args, kwargs)\n\n if isinstance(decimals, (dict, Series)):\n if isinstance(decimals, Series):\n if not decimals.index.is_unique:\n raise ValueError(\"Index of decimals must be unique\")\n new_cols = list(_dict_round(self, decimals))\n elif is_integer(decimals):\n # Dispatch to Series.round\n new_cols = [_series_round(v, decimals) for _, v in self.items()]\n else:\n raise TypeError(\"decimals must be an integer, a dict-like or a Series\")\n\n if len(new_cols) > 0:\n return self._constructor(\n concat(new_cols, axis=1), index=self.index, columns=self.columns\n )\n else:\n return self\n\n # ----------------------------------------------------------------------\n # Statistical methods, etc.\n\n def corr(self, method=\"pearson\", min_periods=1) -> DataFrame:\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'kendall', 'spearman'} or callable\n Method of correlation:\n\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n * callable: callable with input two 1d ndarrays\n and returning a float. Note that the returned matrix from corr\n will have 1 along the diagonals and will be symmetric\n regardless of the callable's behavior.\n\n .. versionadded:: 0.24.0\n\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result. Currently only available for Pearson\n and Spearman correlation.\n\n Returns\n -------\n DataFrame\n Correlation matrix.\n\n See Also\n --------\n DataFrame.corrwith : Compute pairwise correlation with another\n DataFrame or Series.\n Series.corr : Compute the correlation between two Series.\n\n Examples\n --------\n >>> def histogram_intersection(a, b):\n ... v = np.minimum(a, b).sum().round(decimals=1)\n ... return v\n >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.corr(method=histogram_intersection)\n dogs cats\n dogs 1.0 0.3\n cats 0.3 1.0\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n idx = cols.copy()\n mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)\n\n if method == \"pearson\":\n correl = libalgos.nancorr(mat, minp=min_periods)\n elif method == \"spearman\":\n correl = libalgos.nancorr_spearman(mat, minp=min_periods)\n elif method == \"kendall\" or callable(method):\n if min_periods is None:\n min_periods = 1\n mat = mat.T\n corrf = nanops.get_corr_func(method)\n K = len(cols)\n correl = np.empty((K, K), dtype=float)\n mask = np.isfinite(mat)\n for i, ac in enumerate(mat):\n for j, bc in enumerate(mat):\n if i > j:\n continue\n\n valid = mask[i] & mask[j]\n if valid.sum() < min_periods:\n c = np.nan\n elif i == j:\n c = 1.0\n elif not valid.all():\n c = corrf(ac[valid], bc[valid])\n else:\n c = corrf(ac, bc)\n correl[i, j] = c\n correl[j, i] = c\n else:\n raise ValueError(\n \"method must be either 'pearson', \"\n \"'spearman', 'kendall', or a callable, \"\n f\"'{method}' was supplied\"\n )\n\n return self._constructor(correl, index=idx, columns=cols)\n\n def cov(\n self, min_periods: Optional[int] = None, ddof: Optional[int] = 1\n ) -> DataFrame:\n \"\"\"\n Compute pairwise covariance of columns, excluding NA/null values.\n\n Compute the pairwise covariance among the series of a DataFrame.\n The returned data frame is the `covariance matrix\n <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns\n of the DataFrame.\n\n Both NA and null values are automatically excluded from the\n calculation. (See the note below about bias from missing values.)\n A threshold can be set for the minimum number of\n observations for each value created. Comparisons with observations\n below this threshold will be returned as ``NaN``.\n\n This method is generally used for the analysis of time series data to\n understand the relationship between different measures\n across time.\n\n Parameters\n ----------\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n\n ddof : int, default 1\n Delta degrees of freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n The covariance matrix of the series of the DataFrame.\n\n See Also\n --------\n Series.cov : Compute covariance with another Series.\n core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.\n core.window.Expanding.cov : Expanding sample covariance.\n core.window.Rolling.cov : Rolling sample covariance.\n\n Notes\n -----\n Returns the covariance matrix of the DataFrame's time series.\n The covariance is normalized by N-ddof.\n\n For DataFrames that have Series that are missing data (assuming that\n data is `missing at random\n <https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)\n the returned covariance matrix will be an unbiased estimate\n of the variance and covariance between the member Series.\n\n However, for many applications this estimate may not be acceptable\n because the estimate covariance matrix is not guaranteed to be positive\n semi-definite. This could lead to estimate correlations having\n absolute values which are greater than one, and/or a non-invertible\n covariance matrix. See `Estimation of covariance matrices\n <https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_\n matrices>`__ for more details.\n\n Examples\n --------\n >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],\n ... columns=['dogs', 'cats'])\n >>> df.cov()\n dogs cats\n dogs 0.666667 -1.000000\n cats -1.000000 1.666667\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(1000, 5),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df.cov()\n a b c d e\n a 0.998438 -0.020161 0.059277 -0.008943 0.014144\n b -0.020161 1.059352 -0.008543 -0.024738 0.009826\n c 0.059277 -0.008543 1.010670 -0.001486 -0.000271\n d -0.008943 -0.024738 -0.001486 0.921297 -0.013692\n e 0.014144 0.009826 -0.000271 -0.013692 0.977795\n\n **Minimum number of periods**\n\n This method also supports an optional ``min_periods`` keyword\n that specifies the required minimum number of non-NA observations for\n each column pair in order to have a valid result:\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(20, 3),\n ... columns=['a', 'b', 'c'])\n >>> df.loc[df.index[:5], 'a'] = np.nan\n >>> df.loc[df.index[5:10], 'b'] = np.nan\n >>> df.cov(min_periods=12)\n a b c\n a 0.316741 NaN -0.150812\n b NaN 1.248003 0.191417\n c -0.150812 0.191417 0.895202\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n idx = cols.copy()\n mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)\n\n if notna(mat).all():\n if min_periods is not None and min_periods > len(mat):\n base_cov = np.empty((mat.shape[1], mat.shape[1]))\n base_cov.fill(np.nan)\n else:\n base_cov = np.cov(mat.T, ddof=ddof)\n base_cov = base_cov.reshape((len(cols), len(cols)))\n else:\n base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)\n\n return self._constructor(base_cov, index=idx, columns=cols)\n\n def corrwith(self, other, axis: Axis = 0, drop=False, method=\"pearson\") -> Series:\n \"\"\"\n Compute pairwise correlation.\n\n Pairwise correlation is computed between rows or columns of\n DataFrame with rows or columns of Series or DataFrame. DataFrames\n are first aligned along both axes before computing the\n correlations.\n\n Parameters\n ----------\n other : DataFrame, Series\n Object with which to compute correlations.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for\n row-wise.\n drop : bool, default False\n Drop missing indices from result.\n method : {'pearson', 'kendall', 'spearman'} or callable\n Method of correlation:\n\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n * callable: callable with input two 1d ndarrays\n and returning a float.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series\n Pairwise correlations.\n\n See Also\n --------\n DataFrame.corr : Compute pairwise correlation of columns.\n \"\"\"\n axis = self._get_axis_number(axis)\n this = self._get_numeric_data()\n\n if isinstance(other, Series):\n return this.apply(lambda x: other.corr(x, method=method), axis=axis)\n\n other = other._get_numeric_data()\n left, right = this.align(other, join=\"inner\", copy=False)\n\n if axis == 1:\n left = left.T\n right = right.T\n\n if method == \"pearson\":\n # mask missing values\n left = left + right * 0\n right = right + left * 0\n\n # demeaned data\n ldem = left - left.mean()\n rdem = right - right.mean()\n\n num = (ldem * rdem).sum()\n dom = (left.count() - 1) * left.std() * right.std()\n\n correl = num / dom\n\n elif method in [\"kendall\", \"spearman\"] or callable(method):\n\n def c(x):\n return nanops.nancorr(x[0], x[1], method=method)\n\n correl = self._constructor_sliced(\n map(c, zip(left.values.T, right.values.T)), index=left.columns\n )\n\n else:\n raise ValueError(\n f\"Invalid method {method} was passed, \"\n \"valid methods are: 'pearson', 'kendall', \"\n \"'spearman', or callable\"\n )\n\n if not drop:\n # Find non-matching labels along the given axis\n # and append missing correlations (GH 22375)\n raxis = 1 if axis == 0 else 0\n result_index = this._get_axis(raxis).union(other._get_axis(raxis))\n idx_diff = result_index.difference(correl.index)\n\n if len(idx_diff) > 0:\n correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))\n\n return correl\n\n # ----------------------------------------------------------------------\n # ndarray-like stats methods\n\n def count(\n self, axis: Axis = 0, level: Optional[Level] = None, numeric_only: bool = False\n ):\n \"\"\"\n Count non-NA cells for each column or row.\n\n The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending\n on `pandas.options.mode.use_inf_as_na`) are considered NA.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n If 0 or 'index' counts are generated for each column.\n If 1 or 'columns' counts are generated for each row.\n level : int or str, optional\n If the axis is a `MultiIndex` (hierarchical), count along a\n particular `level`, collapsing into a `DataFrame`.\n A `str` specifies the level name.\n numeric_only : bool, default False\n Include only `float`, `int` or `boolean` data.\n\n Returns\n -------\n Series or DataFrame\n For each column/row the number of non-NA/null entries.\n If `level` is specified returns a `DataFrame`.\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.value_counts: Count unique combinations of columns.\n DataFrame.shape: Number of DataFrame rows and columns (including NA\n elements).\n DataFrame.isna: Boolean same-sized DataFrame showing places of NA\n elements.\n\n Examples\n --------\n Constructing DataFrame from a dictionary:\n\n >>> df = pd.DataFrame({\"Person\":\n ... [\"John\", \"Myla\", \"Lewis\", \"John\", \"Myla\"],\n ... \"Age\": [24., np.nan, 21., 33, 26],\n ... \"Single\": [False, True, True, True, False]})\n >>> df\n Person Age Single\n 0 John 24.0 False\n 1 Myla NaN True\n 2 Lewis 21.0 True\n 3 John 33.0 True\n 4 Myla 26.0 False\n\n Notice the uncounted NA values:\n\n >>> df.count()\n Person 5\n Age 4\n Single 5\n dtype: int64\n\n Counts for each **row**:\n\n >>> df.count(axis='columns')\n 0 3\n 1 2\n 2 3\n 3 3\n 4 3\n dtype: int64\n\n Counts for one level of a `MultiIndex`:\n\n >>> df.set_index([\"Person\", \"Single\"]).count(level=\"Person\")\n Age\n Person\n John 2\n Lewis 1\n Myla 1\n \"\"\"\n axis = self._get_axis_number(axis)\n if level is not None:\n return self._count_level(level, axis=axis, numeric_only=numeric_only)\n\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n # GH #423\n if len(frame._get_axis(axis)) == 0:\n result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))\n else:\n if frame._is_mixed_type or frame._mgr.any_extension_types:\n # the or any_extension_types is really only hit for single-\n # column frames with an extension array\n result = notna(frame).sum(axis=axis)\n else:\n # GH13407\n series_counts = notna(frame).sum(axis=axis)\n counts = series_counts.values\n result = self._constructor_sliced(\n counts, index=frame._get_agg_axis(axis)\n )\n\n return result.astype(\"int64\")\n\n def _count_level(self, level: Level, axis: Axis = 0, numeric_only=False):\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n count_axis = frame._get_axis(axis)\n agg_axis = frame._get_agg_axis(axis)\n\n if not isinstance(count_axis, MultiIndex):\n raise TypeError(\n f\"Can only count levels on hierarchical {self._get_axis_name(axis)}.\"\n )\n\n # Mask NaNs: Mask rows or columns where the index level is NaN, and all\n # values in the DataFrame that are NaN\n if frame._is_mixed_type:\n # Since we have mixed types, calling notna(frame.values) might\n # upcast everything to object\n values_mask = notna(frame).values\n else:\n # But use the speedup when we have homogeneous dtypes\n values_mask = notna(frame.values)\n\n index_mask = notna(count_axis.get_level_values(level=level))\n if axis == 1:\n mask = index_mask & values_mask\n else:\n mask = index_mask.reshape(-1, 1) & values_mask\n\n if isinstance(level, str):\n level = count_axis._get_level_number(level)\n\n level_name = count_axis._names[level]\n level_index = count_axis.levels[level]._shallow_copy(name=level_name)\n level_codes = ensure_int64(count_axis.codes[level])\n counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)\n\n if axis == 1:\n result = self._constructor(counts, index=agg_axis, columns=level_index)\n else:\n result = self._constructor(counts, index=level_index, columns=agg_axis)\n\n return result\n\n def _reduce(\n self,\n op,\n name: str,\n *,\n axis: Axis = 0,\n skipna: bool = True,\n numeric_only: Optional[bool] = None,\n filter_type=None,\n **kwds,\n ):\n\n assert filter_type is None or filter_type == \"bool\", filter_type\n out_dtype = \"bool\" if filter_type == \"bool\" else None\n\n own_dtypes = [arr.dtype for arr in self._iter_column_arrays()]\n\n dtype_is_dt = np.array(\n [is_datetime64_any_dtype(dtype) for dtype in own_dtypes],\n dtype=bool,\n )\n if numeric_only is None and name in [\"mean\", \"median\"] and dtype_is_dt.any():\n warnings.warn(\n \"DataFrame.mean and DataFrame.median with numeric_only=None \"\n \"will include datetime64 and datetime64tz columns in a \"\n \"future version.\",\n FutureWarning,\n stacklevel=5,\n )\n cols = self.columns[~dtype_is_dt]\n self = self[cols]\n\n # TODO: Make other agg func handle axis=None properly GH#21597\n axis = self._get_axis_number(axis)\n labels = self._get_agg_axis(axis)\n assert axis in [0, 1]\n\n def func(values: np.ndarray):\n # We only use this in the case that operates on self.values\n return op(values, axis=axis, skipna=skipna, **kwds)\n\n def blk_func(values, axis=1):\n if isinstance(values, ExtensionArray):\n return values._reduce(name, skipna=skipna, **kwds)\n else:\n return op(values, axis=axis, skipna=skipna, **kwds)\n\n def _get_data() -> DataFrame:\n if filter_type is None:\n data = self._get_numeric_data()\n else:\n # GH#25101, GH#24434\n assert filter_type == \"bool\"\n data = self._get_bool_data()\n return data\n\n if numeric_only is not None or axis == 0:\n # For numeric_only non-None and axis non-None, we know\n # which blocks to use and no try/except is needed.\n # For numeric_only=None only the case with axis==0 and no object\n # dtypes are unambiguous can be handled with BlockManager.reduce\n # Case with EAs see GH#35881\n df = self\n if numeric_only is True:\n df = _get_data()\n if axis == 1:\n df = df.T\n axis = 0\n\n ignore_failures = numeric_only is None\n\n # After possibly _get_data and transposing, we are now in the\n # simple case where we can use BlockManager.reduce\n res, indexer = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)\n out = df._constructor(res).iloc[0]\n if out_dtype is not None:\n out = out.astype(out_dtype)\n if axis == 0 and len(self) == 0 and name in [\"sum\", \"prod\"]:\n # Even if we are object dtype, follow numpy and return\n # float64, see test_apply_funcs_over_empty\n out = out.astype(np.float64)\n return out\n\n assert numeric_only is None\n\n data = self\n values = data.values\n\n try:\n result = func(values)\n\n except TypeError:\n # e.g. in nanops trying to convert strs to float\n\n data = _get_data()\n labels = data._get_agg_axis(axis)\n\n values = data.values\n with np.errstate(all=\"ignore\"):\n result = func(values)\n\n if filter_type == \"bool\" and notna(result).all():\n result = result.astype(np.bool_)\n elif filter_type is None and is_object_dtype(result.dtype):\n try:\n result = result.astype(np.float64)\n except (ValueError, TypeError):\n # try to coerce to the original dtypes item by item if we can\n pass\n\n result = self._constructor_sliced(result, index=labels)\n return result\n\n def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:\n \"\"\"\n Count distinct observations over requested axis.\n\n Return Series with number of distinct observations. Can ignore NaN\n values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for\n column-wise.\n dropna : bool, default True\n Don't include NaN in the counts.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.nunique: Method nunique for Series.\n DataFrame.count: Count non-NA cells for each column or row.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})\n >>> df.nunique()\n A 3\n B 1\n dtype: int64\n\n >>> df.nunique(axis=1)\n 0 1\n 1 2\n 2 2\n dtype: int64\n \"\"\"\n return self.apply(Series.nunique, axis=axis, dropna=dropna)\n\n def idxmin(self, axis: Axis = 0, skipna: bool = True) -> Series:\n \"\"\"\n Return index of first occurrence of minimum over requested axis.\n\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n skipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n\n Returns\n -------\n Series\n Indexes of minima along the specified axis.\n\n Raises\n ------\n ValueError\n * If the row/column is empty\n\n See Also\n --------\n Series.idxmin : Return index of the minimum element.\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmin``.\n\n Examples\n --------\n Consider a dataset containing food consumption in Argentina.\n\n >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],\n ... 'co2_emissions': [37.2, 19.66, 1712]},\n ... index=['Pork', 'Wheat Products', 'Beef'])\n\n >>> df\n consumption co2_emissions\n Pork 10.51 37.20\n Wheat Products 103.11 19.66\n Beef 55.48 1712.00\n\n By default, it returns the index for the minimum value in each column.\n\n >>> df.idxmin()\n consumption Pork\n co2_emissions Wheat Products\n dtype: object\n\n To return the index for the minimum value in each row, use ``axis=\"columns\"``.\n\n >>> df.idxmin(axis=\"columns\")\n Pork consumption\n Wheat Products co2_emissions\n Beef consumption\n dtype: object\n \"\"\"\n axis = self._get_axis_number(axis)\n\n res = self._reduce(\n nanops.nanargmin, \"argmin\", axis=axis, skipna=skipna, numeric_only=False\n )\n indices = res._values\n\n # indices will always be np.ndarray since axis is not None and\n # values is a 2d array for DataFrame\n # error: Item \"int\" of \"Union[int, Any]\" has no attribute \"__iter__\"\n assert isinstance(indices, np.ndarray) # for mypy\n\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else np.nan for i in indices]\n return self._constructor_sliced(result, index=self._get_agg_axis(axis))\n\n def idxmax(self, axis: Axis = 0, skipna: bool = True) -> Series:\n \"\"\"\n Return index of first occurrence of maximum over requested axis.\n\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n skipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n\n Returns\n -------\n Series\n Indexes of maxima along the specified axis.\n\n Raises\n ------\n ValueError\n * If the row/column is empty\n\n See Also\n --------\n Series.idxmax : Return index of the maximum element.\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmax``.\n\n Examples\n --------\n Consider a dataset containing food consumption in Argentina.\n\n >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],\n ... 'co2_emissions': [37.2, 19.66, 1712]},\n ... index=['Pork', 'Wheat Products', 'Beef'])\n\n >>> df\n consumption co2_emissions\n Pork 10.51 37.20\n Wheat Products 103.11 19.66\n Beef 55.48 1712.00\n\n By default, it returns the index for the maximum value in each column.\n\n >>> df.idxmax()\n consumption Wheat Products\n co2_emissions Beef\n dtype: object\n\n To return the index for the maximum value in each row, use ``axis=\"columns\"``.\n\n >>> df.idxmax(axis=\"columns\")\n Pork co2_emissions\n Wheat Products consumption\n Beef co2_emissions\n dtype: object\n \"\"\"\n axis = self._get_axis_number(axis)\n\n res = self._reduce(\n nanops.nanargmax, \"argmax\", axis=axis, skipna=skipna, numeric_only=False\n )\n indices = res._values\n\n # indices will always be np.ndarray since axis is not None and\n # values is a 2d array for DataFrame\n # error: Item \"int\" of \"Union[int, Any]\" has no attribute \"__iter__\"\n assert isinstance(indices, np.ndarray) # for mypy\n\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else np.nan for i in indices]\n return self._constructor_sliced(result, index=self._get_agg_axis(axis))\n\n def _get_agg_axis(self, axis_num: int) -> Index:\n \"\"\"\n Let's be explicit about this.\n \"\"\"\n if axis_num == 0:\n return self.columns\n elif axis_num == 1:\n return self.index\n else:\n raise ValueError(f\"Axis must be 0 or 1 (got {repr(axis_num)})\")\n\n def mode(\n self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True\n ) -> DataFrame:\n \"\"\"\n Get the mode(s) of each element along the selected axis.\n\n The mode of a set of values is the value that appears most often.\n It can be multiple values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to iterate over while searching for the mode:\n\n * 0 or 'index' : get mode of each column\n * 1 or 'columns' : get mode of each row.\n\n numeric_only : bool, default False\n If True, only apply to numeric columns.\n dropna : bool, default True\n Don't consider counts of NaN/NaT.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n The modes of each column or row.\n\n See Also\n --------\n Series.mode : Return the highest frequency value in a Series.\n Series.value_counts : Return the counts of values in a Series.\n\n Examples\n --------\n >>> df = pd.DataFrame([('bird', 2, 2),\n ... ('mammal', 4, np.nan),\n ... ('arthropod', 8, 0),\n ... ('bird', 2, np.nan)],\n ... index=('falcon', 'horse', 'spider', 'ostrich'),\n ... columns=('species', 'legs', 'wings'))\n >>> df\n species legs wings\n falcon bird 2 2.0\n horse mammal 4 NaN\n spider arthropod 8 0.0\n ostrich bird 2 NaN\n\n By default, missing values are not considered, and the mode of wings\n are both 0 and 2. Because the resulting DataFrame has two rows,\n the second row of ``species`` and ``legs`` contains ``NaN``.\n\n >>> df.mode()\n species legs wings\n 0 bird 2.0 0.0\n 1 NaN NaN 2.0\n\n Setting ``dropna=False`` ``NaN`` values are considered and they can be\n the mode (like for wings).\n\n >>> df.mode(dropna=False)\n species legs wings\n 0 bird 2 NaN\n\n Setting ``numeric_only=True``, only the mode of numeric columns is\n computed, and columns of other types are ignored.\n\n >>> df.mode(numeric_only=True)\n legs wings\n 0 2.0 0.0\n 1 NaN 2.0\n\n To compute the mode over columns and not rows, use the axis parameter:\n\n >>> df.mode(axis='columns', numeric_only=True)\n 0 1\n falcon 2.0 NaN\n horse 4.0 NaN\n spider 0.0 8.0\n ostrich 2.0 NaN\n \"\"\"\n data = self if not numeric_only else self._get_numeric_data()\n\n def f(s):\n return s.mode(dropna=dropna)\n\n data = data.apply(f, axis=axis)\n # Ensure index is type stable (should always use int index)\n if data.empty:\n data.index = ibase.default_index(0)\n\n return data\n\n def quantile(\n self,\n q=0.5,\n axis: Axis = 0,\n numeric_only: bool = True,\n interpolation: str = \"linear\",\n ):\n \"\"\"\n Return values at the given quantile over requested axis.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n Value between 0 <= q <= 1, the quantile(s) to compute.\n axis : {0, 1, 'index', 'columns'}, default 0\n Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n numeric_only : bool, default True\n If False, the quantile of datetime and timedelta data will be\n computed as well.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n\n Returns\n -------\n Series or DataFrame\n\n If ``q`` is an array, a DataFrame will be returned where the\n index is ``q``, the columns are the columns of self, and the\n values are the quantiles.\n If ``q`` is a float, a Series will be returned where the\n index is the columns of self and the values are the quantiles.\n\n See Also\n --------\n core.window.Rolling.quantile: Rolling quantile.\n numpy.percentile: Numpy function to compute the percentile.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),\n ... columns=['a', 'b'])\n >>> df.quantile(.1)\n a 1.3\n b 3.7\n Name: 0.1, dtype: float64\n >>> df.quantile([.1, .5])\n a b\n 0.1 1.3 3.7\n 0.5 2.5 55.0\n\n Specifying `numeric_only=False` will also compute the quantile of\n datetime and timedelta data.\n\n >>> df = pd.DataFrame({'A': [1, 2],\n ... 'B': [pd.Timestamp('2010'),\n ... pd.Timestamp('2011')],\n ... 'C': [pd.Timedelta('1 days'),\n ... pd.Timedelta('2 days')]})\n >>> df.quantile(0.5, numeric_only=False)\n A 1.5\n B 2010-07-02 12:00:00\n C 1 days 12:00:00\n Name: 0.5, dtype: object\n \"\"\"\n validate_percentile(q)\n\n data = self._get_numeric_data() if numeric_only else self\n axis = self._get_axis_number(axis)\n is_transposed = axis == 1\n\n if is_transposed:\n data = data.T\n\n if len(data.columns) == 0:\n # GH#23925 _get_numeric_data may have dropped all columns\n cols = Index([], name=self.columns.name)\n if is_list_like(q):\n return self._constructor([], index=q, columns=cols)\n return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)\n\n result = data._mgr.quantile(\n qs=q, axis=1, interpolation=interpolation, transposed=is_transposed\n )\n\n if result.ndim == 2:\n result = self._constructor(result)\n else:\n result = self._constructor_sliced(result, name=q)\n\n if is_transposed:\n result = result.T\n\n return result\n\n @doc(NDFrame.asfreq, **_shared_doc_kwargs)\n def asfreq(\n self,\n freq,\n method=None,\n how: Optional[str] = None,\n normalize: bool = False,\n fill_value=None,\n ) -> DataFrame:\n return super().asfreq(\n freq=freq,\n method=method,\n how=how,\n normalize=normalize,\n fill_value=fill_value,\n )\n\n @doc(NDFrame.resample, **_shared_doc_kwargs)\n def resample(\n self,\n rule,\n axis=0,\n closed: Optional[str] = None,\n label: Optional[str] = None,\n convention: str = \"start\",\n kind: Optional[str] = None,\n loffset=None,\n base: Optional[int] = None,\n on=None,\n level=None,\n origin: Union[str, \"TimestampConvertibleTypes\"] = \"start_day\",\n offset: Optional[\"TimedeltaConvertibleTypes\"] = None,\n ) -> Resampler:\n return super().resample(\n rule=rule,\n axis=axis,\n closed=closed,\n label=label,\n convention=convention,\n kind=kind,\n loffset=loffset,\n base=base,\n on=on,\n level=level,\n origin=origin,\n offset=offset,\n )\n\n def to_timestamp(\n self, freq=None, how: str = \"start\", axis: Axis = 0, copy: bool = True\n ) -> DataFrame:\n \"\"\"\n Cast to DatetimeIndex of timestamps, at *beginning* of period.\n\n Parameters\n ----------\n freq : str, default frequency of PeriodIndex\n Desired frequency.\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to convert (the index by default).\n copy : bool, default True\n If False then underlying input data is not copied.\n\n Returns\n -------\n DataFrame with DatetimeIndex\n \"\"\"\n new_obj = self.copy(deep=copy)\n\n axis_name = self._get_axis_name(axis)\n old_ax = getattr(self, axis_name)\n if not isinstance(old_ax, PeriodIndex):\n raise TypeError(f\"unsupported Type {type(old_ax).__name__}\")\n\n new_ax = old_ax.to_timestamp(freq=freq, how=how)\n\n setattr(new_obj, axis_name, new_ax)\n return new_obj\n\n def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame:\n \"\"\"\n Convert DataFrame from DatetimeIndex to PeriodIndex.\n\n Convert DataFrame from DatetimeIndex to PeriodIndex with desired\n frequency (inferred from index if not passed).\n\n Parameters\n ----------\n freq : str, default\n Frequency of the PeriodIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to convert (the index by default).\n copy : bool, default True\n If False then underlying input data is not copied.\n\n Returns\n -------\n DataFrame with PeriodIndex\n \"\"\"\n new_obj = self.copy(deep=copy)\n\n axis_name = self._get_axis_name(axis)\n old_ax = getattr(self, axis_name)\n if not isinstance(old_ax, DatetimeIndex):\n raise TypeError(f\"unsupported Type {type(old_ax).__name__}\")\n\n new_ax = old_ax.to_period(freq=freq)\n\n setattr(new_obj, axis_name, new_ax)\n return new_obj\n\n def isin(self, values) -> DataFrame:\n \"\"\"\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable, Series, DataFrame or dict\n The result will only be true at a location if all the\n labels match. If `values` is a Series, that's the index. If\n `values` is a dict, the keys must be the column names,\n which must match. If `values` is a DataFrame,\n then both the index and column labels must match.\n\n Returns\n -------\n DataFrame\n DataFrame of booleans showing whether each element in the DataFrame\n is contained in values.\n\n See Also\n --------\n DataFrame.eq: Equality test for DataFrame.\n Series.isin: Equivalent method on Series.\n Series.str.contains: Test if pattern or regex is contained within a\n string of a Series or Index.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},\n ... index=['falcon', 'dog'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n\n When ``values`` is a list check whether every value in the DataFrame\n is present in the list (which animals have 0 or 2 legs or wings)\n\n >>> df.isin([0, 2])\n num_legs num_wings\n falcon True True\n dog False True\n\n When ``values`` is a dict, we can pass values to check for each\n column separately:\n\n >>> df.isin({'num_wings': [0, 3]})\n num_legs num_wings\n falcon False False\n dog False True\n\n When ``values`` is a Series or DataFrame the index and column must\n match. Note that 'falcon' does not match based on the number of legs\n in df2.\n\n >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},\n ... index=['spider', 'falcon'])\n >>> df.isin(other)\n num_legs num_wings\n falcon True True\n dog False False\n \"\"\"\n if isinstance(values, dict):\n from pandas.core.reshape.concat import concat\n\n values = collections.defaultdict(list, values)\n return concat(\n (\n self.iloc[:, [i]].isin(values[col])\n for i, col in enumerate(self.columns)\n ),\n axis=1,\n )\n elif isinstance(values, Series):\n if not values.index.is_unique:\n raise ValueError(\"cannot compute isin with a duplicate axis.\")\n return self.eq(values.reindex_like(self), axis=\"index\")\n elif isinstance(values, DataFrame):\n if not (values.columns.is_unique and values.index.is_unique):\n raise ValueError(\"cannot compute isin with a duplicate axis.\")\n return self.eq(values.reindex_like(self))\n else:\n if not is_list_like(values):\n raise TypeError(\n \"only list-like or dict-like objects are allowed \"\n \"to be passed to DataFrame.isin(), \"\n f\"you passed a '{type(values).__name__}'\"\n )\n return self._constructor(\n algorithms.isin(self.values.ravel(), values).reshape(self.shape),\n self.index,\n self.columns,\n )\n\n # ----------------------------------------------------------------------\n # Add index and columns\n _AXIS_ORDERS = [\"index\", \"columns\"]\n _AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {\n **NDFrame._AXIS_TO_AXIS_NUMBER,\n 1: 1,\n \"columns\": 1,\n }\n _AXIS_REVERSED = True\n _AXIS_LEN = len(_AXIS_ORDERS)\n _info_axis_number = 1\n _info_axis_name = \"columns\"\n\n index: Index = properties.AxisProperty(\n axis=1, doc=\"The index (row labels) of the DataFrame.\"\n )\n columns: Index = properties.AxisProperty(\n axis=0, doc=\"The column labels of the DataFrame.\"\n )\n\n @property\n def _AXIS_NUMBERS(self) -> Dict[str, int]:\n \"\"\".. deprecated:: 1.1.0\"\"\"\n super()._AXIS_NUMBERS\n return {\"index\": 0, \"columns\": 1}\n\n @property\n def _AXIS_NAMES(self) -> Dict[int, str]:\n \"\"\".. deprecated:: 1.1.0\"\"\"\n super()._AXIS_NAMES\n return {0: \"index\", 1: \"columns\"}\n\n # ----------------------------------------------------------------------\n # Add plotting methods to DataFrame\n plot = CachedAccessor(\"plot\", pandas.plotting.PlotAccessor)\n hist = pandas.plotting.hist_frame\n boxplot = pandas.plotting.boxplot_frame\n sparse = CachedAccessor(\"sparse\", SparseFrameAccessor)\n\n\nDataFrame._add_numeric_operations()\n\nops.add_flex_arithmetic_methods(DataFrame)\n\n\ndef _from_nested_dict(data) -> collections.defaultdict:\n new_data: collections.defaultdict = collections.defaultdict(dict)\n for index, s in data.items():\n for col, v in s.items():\n new_data[col][index] = v\n return new_data\n\n\ndef _reindex_for_setitem(value: FrameOrSeriesUnion, index: Index) -> ArrayLike:\n # reindex if necessary\n\n if value.index.equals(index) or not len(index):\n return value._values.copy()\n\n # GH#4107\n try:\n reindexed_value = value.reindex(index)._values\n except ValueError as err:\n # raised in MultiIndex.from_tuples, see test_insert_error_msmgs\n if not value.index.is_unique:\n # duplicate axis\n raise err\n\n raise TypeError(\n \"incompatible index of inserted column with frame index\"\n ) from err\n return reindexed_value\n\n\ndef _maybe_atleast_2d(value):\n # TODO(EA2D): not needed with 2D EAs\n\n if is_extension_array_dtype(value):\n return value\n\n return np.atleast_2d(np.asarray(value))\n"
] |
[
[
"pandas.core.dtypes.common.is_list_like",
"pandas.core.construction.create_series_with_explicit_dtype",
"pandas.Series",
"pandas.core.dtypes.common.is_extension_array_dtype",
"numpy.empty_like",
"numpy.asarray",
"pandas.core.construction.array",
"pandas.core.dtypes.common.is_sequence",
"pandas._config.option_context",
"pandas.core.aggregation.agg_list_like",
"pandas._libs.lib.map_infer",
"pandas.core.dtypes.common.is_dict_like",
"numpy.errstate",
"numpy.array",
"pandas.core.aggregation.agg_dict_like"
],
[
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.dtypes.cast.maybe_box_datetimelike",
"pandas.core.aggregation.transform",
"pandas.core.dtypes.common.infer_dtype_from_object",
"pandas.core.aggregation.reconstruct_func",
"pandas.core.internals.construction.mgr_to_mgr",
"numpy.where",
"pandas._libs.algos.nancorr",
"pandas.core.common.standardize_mapping",
"pandas.core.dtypes.cast.maybe_convert_platform",
"numpy.full",
"pandas.core.internals.construction.init_dict",
"pandas._libs.lib.map_infer",
"pandas.core.dtypes.common.is_iterator",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.indexes.base.default_index",
"pandas.core.sorting.lexsort_indexer",
"pandas.core.dtypes.common.is_list_like",
"pandas.io.formats.format.DataFrameRenderer",
"pandas.core.dtypes.common.is_sequence",
"pandas._libs.hashtable.duplicated_int64",
"pandas.core.internals.construction.masked_rec_array_to_mgr",
"numpy.array",
"pandas.core.algorithms.take",
"pandas.io.formats.format.DataFrameFormatter",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"pandas.core.reshape.reshape.stack",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.computation.eval.eval",
"pandas.core.ops.fill_binop",
"pandas._libs.algos.nancorr_spearman",
"pandas.core.dtypes.cast.find_common_type",
"pandas.core.groupby.generic.DataFrameGroupBy",
"pandas.core.internals.construction.arrays_to_mgr",
"numpy.shape",
"pandas.io.formats.style.Styler",
"pandas.core.dtypes.missing.isna",
"pandas.core.generic.NDFrame.__init__",
"numpy.asarray",
"pandas.compat.numpy.function.validate_round",
"pandas._config.get_option",
"pandas.io.gbq.to_gbq",
"pandas.core.internals.construction.sanitize_index",
"pandas.core.dtypes.cast.invalidate_string_dtypes",
"pandas.core.dtypes.common.is_dataclass",
"pandas.core.common.asarray_tuplesafe",
"pandas.compat._optional.import_optional_dependency",
"pandas.io.formats.console.in_interactive_session",
"pandas.core.nanops.nancorr",
"pandas.core.internals.construction.to_arrays",
"pandas.core.ops.frame_arith_method_with_reindex",
"pandas.core.dtypes.cast.infer_dtype_from_scalar",
"pandas.compat.numpy.function.validate_transpose",
"pandas.core.indexes.api.Index",
"pandas.io.stata.StataWriterUTF8",
"numpy.errstate",
"pandas.core.dtypes.cast.maybe_infer_to_datetimelike",
"pandas.util._validators.validate_axis_style_args",
"numpy.rec.fromarrays",
"pandas.core.dtypes.common.is_integer",
"pandas.core.aggregation.relabel_result",
"pandas.core.nanops.get_corr_func",
"pandas.util._decorators.doc",
"pandas._libs.lib.item_from_zerodim",
"numpy.empty",
"pandas.core.reshape.pivot.pivot_table",
"pandas.util._decorators.deprecate_kwarg",
"pandas.io.formats.console.get_console_size",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.reshape.pivot.pivot",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.core.dtypes.missing.notna",
"pandas.core.construction.sanitize_masked_array",
"pandas.core.sorting.get_group_index",
"pandas.core.ops.should_reindex_frame_op",
"pandas.core.reshape.melt.melt",
"pandas.util._decorators.Substitution",
"pandas.core.series.Series",
"pandas.core.internals.construction.init_ndarray",
"pandas.core.ops.align_method_FRAME",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_dict_like",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.cast.validate_numeric_casting",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_hashable",
"numpy.cov",
"numpy.transpose",
"numpy.iterable",
"pandas._libs.properties.AxisProperty",
"pandas.util._decorators.rewrite_axis_style_signature",
"pandas.core.ops.add_flex_arithmetic_methods",
"pandas.core.algorithms.SelectNFrame",
"pandas.core.sorting.nargsort",
"pandas.core.common.is_bool_indexer",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.accessor.CachedAccessor",
"pandas.core.indexes.api.ensure_index",
"pandas.core.ops.get_array_op",
"pandas.util._validators.validate_percentile",
"numpy.dot",
"pandas.core.indexes.multi.maybe_droplevels",
"pandas.core.computation.expressions.where",
"pandas.io.formats.info.DataFrameInfo",
"pandas.core.indexing.check_bool_indexer",
"pandas.io.common.get_handle",
"pandas.io.parquet.to_parquet",
"pandas.core.reshape.reshape.stack_multiple",
"pandas.core.dtypes.common.is_float",
"pandas.core.apply.frame_apply",
"pandas.core.indexes.api.ensure_index_from_sequences",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.indexing.convert_to_index_sliceable",
"pandas.option_context",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.reshape.concat.concat",
"pandas.core.internals.construction.treat_as_nested",
"pandas.core.algorithms.take_2d_multi",
"pandas.core.reshape.reshape.unstack",
"pandas.core.internals.construction.dataclasses_to_dicts",
"numpy.isfinite",
"pandas.io.formats.console.in_ipython_frontend",
"numpy.compress",
"pandas.core.reshape.merge.merge",
"pandas.core.internals.construction.nested_data_to_arrays",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.indexes.multi.MultiIndex.from_arrays",
"pandas.core.common.apply_if_callable",
"pandas._libs.lib.maybe_convert_objects",
"pandas.io.feather_format.to_feather",
"pandas.core.internals.construction.reorder_arrays",
"pandas.core.construction.extract_array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.0",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
totucuong/vae-seq
|
[
"0a1bace02c6bac6ab991ab8203a203d3061615ec"
] |
[
"vaeseq/examples/text/dataset.py"
] |
[
"# Copyright 2018 Google, Inc.,\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Dataset for iterating over text.\"\"\"\n\nimport collections\nimport numpy as np\nimport tensorflow as tf\n\n\ndef _split_string(string):\n \"\"\"Splits a byte string into an array of character bytes.\"\"\"\n text = tf.compat.as_text(string)\n ret = np.empty(len(text), dtype=np.object)\n for i, char in enumerate(text):\n ret[i] = tf.compat.as_bytes(char)\n return ret\n\n\ndef vocabulary(filename, max_size=None, num_oov_buckets=1):\n \"\"\"Builds vocabulary and ID lookup tables from the given file.\"\"\"\n\n def _unique_chars(filename):\n \"\"\"Returns the used alphabet as an array of strings.\"\"\"\n counts = collections.Counter()\n with tf.gfile.Open(filename) as file_:\n for line in file_:\n counts.update(_split_string(line))\n alphabet = [k for (k, _) in counts.most_common(max_size)]\n alphabet.sort()\n return np.asarray(alphabet, dtype=np.object)\n\n chars, = tf.py_func(_unique_chars, [filename], [tf.string])\n char_to_id = tf.contrib.lookup.index_table_from_tensor(\n chars, num_oov_buckets=num_oov_buckets)\n id_to_char = tf.contrib.lookup.index_to_string_table_from_tensor(chars, \" \")\n return char_to_id, id_to_char\n\n\ndef characters(filename, batch_size, sequence_size):\n \"\"\"Returns a dataset of characters from the given file.\"\"\"\n\n def _to_chars(line):\n \"\"\"string scalar -> Dataset of characters (string scalars).\"\"\"\n chars, = tf.py_func(_split_string, [line + \"\\n\"], [tf.string])\n chars.set_shape([None])\n return tf.data.Dataset.from_tensor_slices(chars)\n\n return (tf.data.TextLineDataset([filename])\n .flat_map(_to_chars)\n .repeat()\n .batch(tf.to_int64(sequence_size))\n .shuffle(1000)\n .batch(tf.to_int64(batch_size)))\n"
] |
[
[
"tensorflow.contrib.lookup.index_to_string_table_from_tensor",
"tensorflow.to_int64",
"tensorflow.contrib.lookup.index_table_from_tensor",
"tensorflow.gfile.Open",
"numpy.asarray",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.compat.as_bytes",
"tensorflow.data.TextLineDataset",
"tensorflow.compat.as_text",
"tensorflow.py_func"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
siddharthab/tensorflow
|
[
"fbeca0b40aaec37c1ff7fbc3cf84215755faac51"
] |
[
"tensorflow/python/keras/engine/data_adapter_test.py"
] |
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"DataAdapter tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras.engine import data_adapter\nfrom tensorflow.python.keras.utils import data_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass DataAdapterTestBase(test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(DataAdapterTestBase, self).setUp()\n self.batch_size = 5\n self.numpy_input = np.zeros((50, 10))\n self.numpy_target = np.ones(50)\n self.tensor_input = constant_op.constant(2.0, shape=(50, 10))\n self.tensor_target = array_ops.ones((50,))\n self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(\n (self.numpy_input, self.numpy_target)).shuffle(50).batch(\n self.batch_size)\n\n def generator():\n while True:\n yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))\n self.generator_input = generator()\n self.sequence_input = TestSequence(batch_size=self.batch_size,\n feature_shape=10)\n self.model = keras.models.Sequential(\n [keras.layers.Dense(8, input_shape=(10,), activation='softmax')])\n\n\nclass TestSequence(data_utils.Sequence):\n\n def __init__(self, batch_size, feature_shape):\n self.batch_size = batch_size\n self.feature_shape = feature_shape\n\n def __getitem__(self, item):\n return (np.zeros((self.batch_size, self.feature_shape)),\n np.ones((self.batch_size,)))\n\n def __len__(self):\n return 10\n\n\nclass TensorLikeDataAdapterTest(DataAdapterTestBase):\n\n def setUp(self):\n super(TensorLikeDataAdapterTest, self).setUp()\n self.adapter_cls = data_adapter.TensorLikeDataAdapter\n\n def test_can_handle_numpy(self):\n self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))\n self.assertTrue(\n self.adapter_cls.can_handle(self.numpy_input, self.numpy_target))\n\n self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))\n self.assertFalse(self.adapter_cls.can_handle(self.generator_input))\n self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))\n\n def test_iterator_expect_batch_size_numpy(self):\n with self.assertRaisesRegexp(\n ValueError, r'`batch_size` or `steps` is required'):\n self.adapter_cls(self.numpy_input, self.numpy_target)\n\n def test_size_numpy(self):\n adapter = self.adapter_cls(\n self.numpy_input, self.numpy_target, batch_size=5)\n self.assertEqual(adapter.get_size(), 10)\n self.assertFalse(adapter.has_partial_batch())\n\n def test_batch_size_numpy(self):\n adapter = self.adapter_cls(\n self.numpy_input, self.numpy_target, batch_size=5)\n self.assertEqual(adapter.batch_size(), 5)\n\n def test_partial_batch_numpy(self):\n adapter = self.adapter_cls(\n self.numpy_input, self.numpy_target, batch_size=4)\n self.assertEqual(adapter.get_size(), 13) # 50/4\n self.assertTrue(adapter.has_partial_batch())\n self.assertEqual(adapter.partial_batch_size(), 2)\n\n def test_training_numpy(self):\n dataset = self.adapter_cls(\n self.numpy_input, self.numpy_target, batch_size=5).get_dataset()\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(dataset)\n\n def test_can_handle(self):\n self.assertTrue(self.adapter_cls.can_handle(self.tensor_input))\n self.assertTrue(\n self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))\n\n self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))\n self.assertFalse(self.adapter_cls.can_handle(self.generator_input))\n self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))\n\n def test_training(self):\n dataset = self.adapter_cls(\n self.tensor_input, self.tensor_target, batch_size=5).get_dataset()\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(dataset)\n\n def test_size(self):\n adapter = self.adapter_cls(\n self.tensor_input, self.tensor_target, batch_size=5)\n self.assertEqual(adapter.get_size(), 10)\n self.assertFalse(adapter.has_partial_batch())\n\n @parameterized.named_parameters(\n ('batch_size_5', 5, None, 5),\n ('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence\n ('steps_1', None, 1, 50),\n ('steps_4', None, 4, 13),\n )\n def test_batch_size(self, batch_size_in, steps, batch_size_out):\n adapter = self.adapter_cls(\n self.tensor_input, self.tensor_target, batch_size=batch_size_in,\n steps=steps)\n self.assertEqual(adapter.batch_size(), batch_size_out)\n\n @parameterized.named_parameters(\n ('batch_size_5', 5, None, 10, 0),\n ('batch_size_4', 4, None, 13, 2),\n ('steps_1', None, 1, 1, 0),\n ('steps_5', None, 5, 5, 0),\n ('steps_4', None, 4, 4, 11),\n )\n def test_partial_batch(\n self, batch_size_in, steps, size, partial_batch_size):\n adapter = self.adapter_cls(\n self.tensor_input, self.tensor_target, batch_size=batch_size_in,\n steps=steps)\n self.assertEqual(adapter.get_size(), size) # 50/steps\n self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))\n self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)\n\n\nclass DatasetAdapterTest(DataAdapterTestBase):\n\n def setUp(self):\n super(DatasetAdapterTest, self).setUp()\n self.adapter_cls = data_adapter.DatasetAdapter\n\n def test_can_handle(self):\n self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))\n self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))\n self.assertTrue(self.adapter_cls.can_handle(self.dataset_input))\n self.assertFalse(self.adapter_cls.can_handle(self.generator_input))\n self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))\n\n def test_training(self):\n dataset = self.adapter_cls(self.dataset_input).get_dataset()\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(dataset)\n\n def test_size(self):\n adapter = self.adapter_cls(self.dataset_input)\n self.assertIsNone(adapter.get_size())\n\n def test_batch_size(self):\n adapter = self.adapter_cls(self.dataset_input)\n self.assertIsNone(adapter.batch_size())\n\n def test_partial_batch(self):\n adapter = self.adapter_cls(self.dataset_input)\n self.assertFalse(adapter.has_partial_batch())\n self.assertIsNone(adapter.partial_batch_size())\n\n\nclass GeneratorDataAdapterTest(DataAdapterTestBase):\n\n def setUp(self):\n super(GeneratorDataAdapterTest, self).setUp()\n self.adapter_cls = data_adapter.GeneratorDataAdapter\n\n def test_can_handle(self):\n self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))\n self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))\n self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))\n self.assertTrue(self.adapter_cls.can_handle(self.generator_input))\n self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))\n\n def test_training(self):\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(self.generator_input, steps_per_epoch=10)\n\n @test_util.run_v2_only\n def test_with_multiprocessing_training(self):\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(self.generator_input, workers=1, use_multiprocessing=True,\n max_queue_size=10, steps_per_epoch=10)\n # Fit twice to ensure there isn't any duplication that prevent the worker\n # from starting.\n self.model.fit(self.generator_input, workers=1, use_multiprocessing=True,\n max_queue_size=10, steps_per_epoch=10)\n\n def test_size(self):\n adapter = self.adapter_cls(self.generator_input)\n self.assertIsNone(adapter.get_size())\n\n def test_batch_size(self):\n adapter = self.adapter_cls(self.generator_input)\n self.assertEqual(adapter.batch_size(), 5)\n\n def test_partial_batch(self):\n adapter = self.adapter_cls(self.generator_input)\n self.assertFalse(adapter.has_partial_batch())\n self.assertIsNone(adapter.partial_batch_size())\n\n\nclass KerasSequenceAdapterTest(DataAdapterTestBase):\n\n def setUp(self):\n super(KerasSequenceAdapterTest, self).setUp()\n self.adapter_cls = data_adapter.KerasSequenceAdapter\n\n def test_can_handle(self):\n self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))\n self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))\n self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))\n self.assertFalse(self.adapter_cls.can_handle(self.generator_input))\n self.assertTrue(self.adapter_cls.can_handle(self.sequence_input))\n\n def test_training(self):\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(self.sequence_input)\n\n @test_util.run_v2_only\n def test_with_multiprocessing_training(self):\n self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd')\n self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,\n max_queue_size=10, steps_per_epoch=10)\n # Fit twice to ensure there isn't any duplication that prevent the worker\n # from starting.\n self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,\n max_queue_size=10, steps_per_epoch=10)\n\n def test_size(self):\n adapter = self.adapter_cls(self.sequence_input)\n self.assertEqual(adapter.get_size(), 10)\n\n def test_batch_size(self):\n adapter = self.adapter_cls(self.sequence_input)\n self.assertEqual(adapter.batch_size(), 5)\n\n def test_partial_batch(self):\n adapter = self.adapter_cls(self.sequence_input)\n self.assertFalse(adapter.has_partial_batch())\n self.assertIsNone(adapter.partial_batch_size())\n\n\nif __name__ == '__main__':\n test.main()\n"
] |
[
[
"tensorflow.python.data.ops.dataset_ops.DatasetV2.from_tensor_slices",
"tensorflow.python.keras.layers.Dense",
"numpy.ones",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.ones",
"numpy.zeros",
"tensorflow.python.framework.constant_op.constant"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
sivasanarul/amfe_topopt
|
[
"ba7fa1ce756e7ea6e4fd7b2bdb609b83bbfac472"
] |
[
"examples/nonlinear_beam_hyperreduction.py"
] |
[
"# Beam example\n\n# Distributed under BSD-3-Clause License. See LICENSE-File for more information\n#\n\"\"\"\nExample showing a cantilever beam which is loaded on the tip with a force\nshowing nonlinear displacements.\n\nThe beam is reduced with ECSW and NSKTS\n\"\"\"\n\nimport os\nimport time\nimport numpy as np\nfrom h5py import File\n\nfrom amfe.ui import *\nfrom amfe.io import amfe_dir\nfrom amfe.io.mesh import AmfeMeshObjMeshReader\nfrom amfe.io.postprocessing import *\nfrom amfe.material import KirchhoffMaterial\nfrom amfe.solver import *\nfrom amfe.mor import *\nfrom amfe.mor.hyper_red import *\nfrom amfe.structural_dynamics import vibration_modes\n\nstudies = []\nstudies.append('full_ti')\n#studies.append('create_basis_1')\n#studies.append('red_ti')\n#studies.append('ecsw')\n#studies.append('poly3')\n\nOmega = 31.0\n\ntimes = dict([])\ninput_file = amfe_dir('meshes/gmsh/bar.msh')\noutput_file = amfe_dir('results/beam_nonlinear_refactoring/beam_ecsw')\n\n# Define material\nmaterial = KirchhoffMaterial(E=210E9, nu=0.3, rho=1E4, plane_stress=True)\n# Load Mesh\nmesh = import_mesh_from_file(input_file)\n# Create Component\ncomponent = create_structural_component(mesh)\n# Assign material\ncomponent.assign_material(material, [7], 'S')\n# Assign Dirichlet Boundaries\nset_dirichlet_by_group(component, 8, ('ux', 'uy'))\n# Assign Neumann Boundaries\nforce = component.neumann.create_fixed_direction_neumann(np.array([0, -1], dtype=float),\n lambda t: 1E8*np.sin(Omega*t))\ncomponent.assign_neumann('Force', force, [9])\n\n#\nsystem, formulation = create_constrained_mechanical_system_from_component(component, constant_mass=True,\n constant_damping=True,\n constraint_formulation='boolean')\n\n# Solver Factory:\nsolfac = SolverFactory()\nsolfac.set_system(system)\nsolfac.set_dt_initial(0.001)\nsolfac.set_newton_maxiter(30)\nsolfac.set_newton_atol(1e-6)\nsolfac.set_newton_rtol(1e-8)\nsolfac.set_linear_solver('scipy-sparse')\nsolfac.set_nonlinear_solver('newton')\nsolfac.set_analysis_type('transient')\nsolfac.set_integrator('genalpha')\n\nsolver = solfac.create_solver()\n\nsol_dyn_nl_full = AmfeSolution()\n\n\ndef write_callback(t, x, dx, ddx):\n u, du, ddu = formulation.recover(x, dx, ddx, t)\n sol_dyn_nl_full.write_timestep(t, u, du, ddu)\n\n\nno_of_dofs = system.dimension\nx0 = np.zeros(no_of_dofs)\ndx0 = x0.copy()\nt_start = 0.0\nt_end = 1.0\n\nif 'full_ti' in studies:\n t0 = time.time()\n solver.solve(write_callback, t_start, x0, dx0, t_end)\n t1 = time.time()\n times.update({'Nonlinear full solution:': t1-t0})\n print('Full dynamic solution took {} seconds'.format(t1-t0))\n\n write_results_to_paraview(sol_dyn_nl_full, component, output_file + '_dyn_nl_full')\n\n# ------------------- SOLVE LINEAR DYNAMICS ------------------------------\n\nif 'create_basis_1' in studies:\n t0 = time.time()\n K0 = system.K(x0, dx0, 0.0)\n M0 = system.M(x0, dx0, 0.0)\n omega, V = vibration_modes(K0, M0, 6, mass_orth=True)\n\n def sdK(x):\n return system.K(x, x0, 0.0)\n\n Theta = static_derivatives(V, sdK, M0)\n V_extended = augment_with_derivatives(V, Theta)\n t1 = time.time()\n times.update({'nonlinear basis generation:': t1-t0})\n print('nonlinear basis generation took {} seconds'.format(t1-t0))\n\n sol_basis_1 = AmfeSolution()\n for i in np.arange(V_extended.shape[1]):\n u = formulation.u(V_extended[:, i], 0.0)\n sol_basis_1.write_timestep(i, u)\n\n write_results_to_paraview(sol_basis_1, component, output_file + '_reduction_basis_1')\n\n sol_basis_sd = AmfeSolution()\n counter = 0\n for i in np.arange(Theta.shape[1]):\n for j in np.arange(Theta.shape[1]):\n if i > j:\n Theta_u = formulation.u(Theta[:, i, j], 0.0)\n sol_basis_sd.write_timestep(counter, Theta_u)\n counter = counter + 1\n\n write_results_to_paraview(sol_basis_sd, component, output_file + '_static_derivatives')\n\nif 'nskts' in studies:\n # Training Set Generation\n t0 = time.time()\n K0 = system.K(x0, dx0, 0.0)\n M0 = system.M(x0, dx0, 0.0)\n t_max = np.pi/2/Omega\n F_ext_max = system.f_ext(x0, dx0, t_max)\n\n def fint_func(x):\n return system.f_int(x, dx0, 0.0)\n\n def K_func(x):\n return system.K(x, dx0, 0.0)\n\n nskts = compute_nskts(K0, M0, F_ext_max, fint_func, K_func)\n t1 = time.time()\n times.update({'Training-Set Generation (NSKTS):': t1-t0})\n print('Training-Set Generation (NSKTS) took {} seconds'.format(t1 - t0))\n\n sol_nskts = AmfeSolution()\n\n for i in range(nskts.shape[1]):\n # Recover unconstrained u\n u = formulation.u(nskts[:, i], 0.0)\n sol_nskts.write_timestep(i, u)\n\n write_results_to_paraview(sol_nskts, component, output_file + '_nskts')\nelse:\n nskts = np.load(output_file + '_nskts.npy')\n\n\nif 'red_ti' in studies:\n # Reduce system\n t0 = time.time()\n red_system = reduce_mechanical_system(system, V_extended, constant_mass=True, constant_damping=True)\n t1 = time.time()\n times.update({'Reduction step:': t1-t0})\n print('Reduction step took {} seconds'.format(t1 - t0))\n solfac.set_system(red_system)\n\n red_solver = solfac.create_solver()\n\n sol_dyn_nl_red = AmfeSolution()\n\n\n def write_callback(t, x, dx, ddx):\n u, du, ddu = formulation.recover(V_extended.dot(x), V_extended.dot(dx), V_extended.dot(ddx), t)\n sol_dyn_nl_red.write_timestep(t, u, du, ddu)\n\n\n no_of_red_dofs = red_system.dimension\n x0 = np.zeros(no_of_red_dofs)\n dx0 = x0.copy()\n t_start = 0.0\n t_end = 1.0\n\n t0 = time.time()\n red_solver.solve(write_callback, t_start, x0, dx0, t_end)\n t1 = time.time()\n times.update({'Nonlinear full solution:': t1 - t0})\n print('Full dynamic solution took {} seconds'.format(t1 - t0))\n\n write_results_to_paraview(sol_dyn_nl_red, component, output_file + '_dyn_nl_red')\n\n\nif 'ecsw_weight_generation' in studies:\n # Hyperreduction ECSW\n t0 = time.time()\n q_training = np.linalg.solve((V_extended.T @ V_extended), V_extended.T @ nskts)\n\n x_training = V_extended @ q_training\n weights, indices, stats = ecsw_get_weights_from_constrained_training(x_training, component, formulation, V_extended)\n\n np.save(output_file + '_ecsw_weights.npy', weights)\n np.save(output_file + '_ecsw_indices.npy', indices)\n t1 = time.time()\n times.update({'Hyperreduction step:': t1-t0})\n print('Hyperreduction step:'.format(t1 - t0))\n\nif 'ecsw' in studies:\n weights = np.load(output_file + '_ecsw_weights.npy')\n indices = np.load(output_file + '_ecsw_indices.npy')\n\n # Create reduced system\n tagname = 'ecsw_weights'\n ecsw_system, ecsw_formulation, ecsw_component = create_ecsw_hyperreduced_mechanical_system_from_weights(component, V_extended, weights, indices, 'boolean',\n constant_mass=True, constant_damping=True,\n tagname=tagname)\n\n # Solve system\n solfac.set_system(ecsw_system)\n ecsw_solver = solfac.create_solver()\n\n sol_dyn_nl_ecsw = AmfeSolution()\n\n def write_callback(t, x, dx, ddx):\n u, du, ddu = ecsw_formulation.recover(V_extended.dot(x), V_extended.dot(dx), V_extended.dot(ddx), t)\n sol_dyn_nl_ecsw.write_timestep(t, u, du, ddu)\n\n\n # Set initial conditions\n no_of_dofs = ecsw_system.dimension\n x0 = np.zeros(no_of_dofs)\n dx0 = x0.copy()\n # Set start end endtime for time integration\n t_start = 0.0\n t_end = 1.0\n\n # Solve hyperreduced system\n t0 = time.time()\n ecsw_solver.solve(write_callback, t_start, x0, dx0, t_end)\n t1 = time.time()\n times.update({'ECSW solution:': t1 - t0})\n print('ECSW solution took {} seconds'.format(t1 - t0))\n\n # -- POSTPROCESSING --\n # Instantiate Hdf5PostProcessorWriter\n mreader = AmfeMeshObjMeshReader(component.mesh)\n ecsw_output = output_file + '_ecsw_weights.hdf5'\n if os.path.isfile(ecsw_output):\n os.remove(ecsw_output)\n hwriter = Hdf5PostProcessorWriter(mreader, ecsw_output)\n\n # Write Solution\n preader = AmfeSolutionReader(sol_dyn_nl_ecsw, component)\n preader.parse(hwriter)\n\n # Write ECSW weights\n data = ecsw_component.mesh.el_df[tagname].values\n indices = ecsw_component.mesh.el_df.index.values\n hwriter.write_field(tagname, PostProcessDataType.SCALAR, sol_dyn_nl_ecsw.t,\n data, indices, MeshEntityType.ELEMENT)\n\n # Finish writing -> Call return result\n hwriter.return_result()\n\n # Write xdmf file from hdf5 for viewing in paraview\n paraviewfilename = output_file + '_ecsw_weights'\n hdf5resultsfilename = paraviewfilename + '.hdf5'\n xdmfresultsfilename = paraviewfilename + '.xdmf'\n\n fielddict = {'weights': {'mesh_entity_type': MeshEntityType.ELEMENT,\n 'data_type': PostProcessDataType.SCALAR,\n 'hdf5path': '/results/ecsw_weights'\n },\n 'displacement': {'mesh_entity_type': MeshEntityType.NODE,\n 'data_type': PostProcessDataType.VECTOR,\n 'hdf5path': '/results/displacement'\n }\n }\n\n with open(xdmfresultsfilename, 'wb') as xdmffp:\n with File(hdf5resultsfilename, mode='r') as hdf5fp:\n write_xdmf_from_hdf5(xdmffp, hdf5fp, '/mesh/nodes', '/mesh/topology', sol_dyn_nl_ecsw.t, fielddict)\n\n\nif 'poly3' in studies:\n K1, K2, K3 = poly3_get_tensors(system, V_extended)\n poly3_system = create_poly3_hyperreduced_system(system, V_extended, K1, K2, K3)\n\n solfac.set_system(poly3_system)\n poly3_solver = solfac.create_solver()\n\n sol_dyn_nl_poly3 = AmfeSolution()\n\n\n def write_callback(t, x, dx, ddx):\n u, du, ddu = formulation.recover(V_extended.dot(x), V_extended.dot(dx), V_extended.dot(ddx), t)\n sol_dyn_nl_poly3.write_timestep(t, u, du, ddu)\n\n\n no_of_red_dofs = poly3_system.dimension\n x0 = np.zeros(no_of_red_dofs)\n dx0 = x0.copy()\n t_start = 0.0\n t_end = 1.0\n\n t0 = time.time()\n poly3_solver.solve(write_callback, t_start, x0, dx0, t_end)\n t1 = time.time()\n times.update({'Nonlinear full solution:': t1 - t0})\n print('Full dynamic solution took {} seconds'.format(t1 - t0))\n\n write_results_to_paraview(sol_dyn_nl_poly3, component, output_file + '_dyn_nl_poly3')\n"
] |
[
[
"numpy.linalg.solve",
"numpy.arange",
"numpy.save",
"numpy.sin",
"numpy.load",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
StephAO/gym-minigrid
|
[
"6ab2914c2731a68e41e5b4c97a6877b19d4964b5"
] |
[
"gym_minigrid/minigrid.py"
] |
[
"import math\nimport hashlib\nimport gym\nfrom enum import IntEnum\nimport numpy as np\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nfrom .rendering import *\n\n# Size in pixels of a tile in the full-scale human view\nTILE_PIXELS = 32\n\n# Map of color names to RGB values\nCOLORS = {\n 'red' : np.array([255, 0, 0]),\n 'green' : np.array([0, 255, 0]),\n 'blue' : np.array([0, 0, 255]),\n 'purple': np.array([112, 39, 195]),\n 'yellow': np.array([255, 255, 0]),\n 'grey' : np.array([100, 100, 100]),\n 'white' : np.array([255, 255, 255]),\n 'cyan' : np.array([0, 255, 255]),\n 'brown' : np.array([139, 69, 19]),\n 'orange' : np.array([255, 99, 71])\n}\n\nCOLOR_NAMES = sorted(list(COLORS.keys()))\n\n# Used to map colors to integers\nCOLOR_TO_IDX = {\n 'red' : 0,\n 'green' : 1,\n 'blue' : 2,\n 'purple': 3,\n 'yellow': 4,\n 'grey' : 5,\n 'white' : 6,\n 'cyan' : 7,\n 'brown' : 8,\n 'orange' : 9 \n}\n\nIDX_TO_COLOR = dict(zip(COLOR_TO_IDX.values(), COLOR_TO_IDX.keys()))\n\n# Map of object type to integers\nOBJECT_TO_IDX = {\n 'unseen' : 0,\n 'empty' : 1,\n 'wall' : 2,\n 'floor' : 3,\n 'door' : 4,\n 'key' : 5,\n 'ball' : 6,\n 'box' : 7,\n 'goal' : 8,\n 'lava' : 9,\n 'agent' : 10,\n 'square' : 11,\n 'crate' : 12,\n 'circle' : 13\n}\n\nIDX_TO_OBJECT = dict(zip(OBJECT_TO_IDX.values(), OBJECT_TO_IDX.keys()))\n\n# Map of state names to integers\nSTATE_TO_IDX = {\n 'open' : 0,\n 'closed': 1,\n 'locked': 2,\n}\n\n# Map of agent direction indices to vectors\nDIR_TO_VEC = [\n # Pointing right (positive X)\n np.array((1, 0)),\n # Down (positive Y)\n np.array((0, 1)),\n # Pointing left (negative X)\n np.array((-1, 0)),\n # Up (negative Y)\n np.array((0, -1)),\n]\n\nclass WorldObj:\n \"\"\"\n Base class for grid world objects\n \"\"\"\n\n def __init__(self, type, color):\n assert type in OBJECT_TO_IDX, type\n assert color in COLOR_TO_IDX, color\n self.type = type\n self.color = color\n self.contains = None\n\n # Initial position of the object\n self.init_pos = None\n\n # Current position of the object\n self.cur_pos = None\n\n def can_overlap(self):\n \"\"\"Can the agent overlap with this?\"\"\"\n return False\n\n def can_pickup(self):\n \"\"\"Can the agent pick this up?\"\"\"\n return False\n\n def can_contain(self):\n \"\"\"Can this contain another object?\"\"\"\n return False\n\n def see_behind(self):\n \"\"\"Can the agent see behind this object?\"\"\"\n return True\n\n def toggle(self, env, pos):\n \"\"\"Method to trigger/toggle an action this object performs\"\"\"\n return False\n\n def encode(self):\n \"\"\"Encode the a description of this object as a 3-tuple of integers\"\"\"\n return (OBJECT_TO_IDX[self.type], COLOR_TO_IDX[self.color], 0)\n\n @staticmethod\n def decode(type_idx, color_idx, state):\n \"\"\"Create an object from a 3-tuple state description\"\"\"\n\n obj_type = IDX_TO_OBJECT[type_idx]\n color = IDX_TO_COLOR[color_idx]\n\n if obj_type == 'empty' or obj_type == 'unseen':\n return None\n\n # State, 0: open, 1: closed, 2: locked\n is_open = state == 0\n is_locked = state == 2\n\n if obj_type == 'wall':\n v = Wall(color)\n elif obj_type == 'floor':\n v = Floor(color)\n elif obj_type == 'ball':\n v = Ball(color)\n elif obj_type == 'key':\n v = Key(color)\n elif obj_type == 'box':\n v = Box(color)\n elif obj_type == 'door':\n v = Door(color, is_open, is_locked)\n elif obj_type == 'goal':\n v = Goal()\n elif obj_type == 'lava':\n v = Lava()\n elif obj_type == 'circle':\n v = Circle()\n elif obj_type == 'square':\n v = Square()\n elif obj_type == 'crate':\n v = Crate()\n else:\n assert False, \"unknown object type in decode '%s'\" % obj_type\n\n return v\n\n def render(self, r):\n \"\"\"Draw this object with the given renderer\"\"\"\n raise NotImplementedError\n\nclass Goal(WorldObj):\n def __init__(self):\n super().__init__('goal', 'green')\n\n def can_overlap(self):\n return True\n\n def render(self, img):\n fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])\n\nclass Floor(WorldObj):\n \"\"\"\n Colored floor tile the agent can walk over\n \"\"\"\n\n def __init__(self, color='blue'):\n super().__init__('floor', color)\n\n def can_overlap(self):\n return True\n\n def render(self, img):\n # Give the floor a pale color\n color = COLORS[self.color] / 2\n fill_coords(img, point_in_rect(0.031, 1, 0.031, 1), color)\n\n\nclass Lava(WorldObj):\n def __init__(self):\n super().__init__('lava', 'red')\n\n def can_overlap(self):\n return True\n\n def render(self, img):\n c = (255, 128, 0)\n\n # Background color\n fill_coords(img, point_in_rect(0, 1, 0, 1), c)\n\n # Little waves\n for i in range(3):\n ylo = 0.3 + 0.2 * i\n yhi = 0.4 + 0.2 * i\n fill_coords(img, point_in_line(0.1, ylo, 0.3, yhi, r=0.03), (0,0,0))\n fill_coords(img, point_in_line(0.3, yhi, 0.5, ylo, r=0.03), (0,0,0))\n fill_coords(img, point_in_line(0.5, ylo, 0.7, yhi, r=0.03), (0,0,0))\n fill_coords(img, point_in_line(0.7, yhi, 0.9, ylo, r=0.03), (0,0,0))\n\nclass Wall(WorldObj):\n def __init__(self, color='grey'):\n super().__init__('wall', color)\n\n def see_behind(self):\n return False\n\n def render(self, img):\n fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])\n\nclass Door(WorldObj):\n def __init__(self, color, is_open=False, is_locked=False):\n super().__init__('door', color)\n self.is_open = is_open\n self.is_locked = is_locked\n\n def can_overlap(self):\n \"\"\"The agent can only walk over this cell when the door is open\"\"\"\n return self.is_open\n\n def see_behind(self):\n return self.is_open\n\n def toggle(self, env, pos):\n # If the player has the right key to open the door\n if self.is_locked:\n if isinstance(env.carrying, Key) and env.carrying.color == self.color:\n self.is_locked = False\n self.is_open = True\n return True\n return False\n\n self.is_open = not self.is_open\n return True\n\n def encode(self):\n \"\"\"Encode the a description of this object as a 3-tuple of integers\"\"\"\n\n # State, 0: open, 1: closed, 2: locked\n if self.is_open:\n state = 0\n elif self.is_locked:\n state = 2\n elif not self.is_open:\n state = 1\n\n return (OBJECT_TO_IDX[self.type], COLOR_TO_IDX[self.color], state)\n\n def render(self, img):\n c = COLORS[self.color]\n\n if self.is_open:\n fill_coords(img, point_in_rect(0.88, 1.00, 0.00, 1.00), c)\n fill_coords(img, point_in_rect(0.92, 0.96, 0.04, 0.96), (0,0,0))\n return\n\n # Door frame and door\n if self.is_locked:\n fill_coords(img, point_in_rect(0.00, 1.00, 0.00, 1.00), c)\n fill_coords(img, point_in_rect(0.06, 0.94, 0.06, 0.94), 0.45 * np.array(c))\n\n # Draw key slot\n fill_coords(img, point_in_rect(0.52, 0.75, 0.50, 0.56), c)\n else:\n fill_coords(img, point_in_rect(0.00, 1.00, 0.00, 1.00), c)\n fill_coords(img, point_in_rect(0.04, 0.96, 0.04, 0.96), (0,0,0))\n fill_coords(img, point_in_rect(0.08, 0.92, 0.08, 0.92), c)\n fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), (0,0,0))\n\n # Draw door handle\n fill_coords(img, point_in_circle(cx=0.75, cy=0.50, r=0.08), c)\n\nclass Key(WorldObj):\n def __init__(self, color='blue'):\n super(Key, self).__init__('key', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n c = COLORS[self.color]\n\n # Vertical quad\n fill_coords(img, point_in_rect(0.50, 0.63, 0.31, 0.88), c)\n\n # Teeth\n fill_coords(img, point_in_rect(0.38, 0.50, 0.59, 0.66), c)\n fill_coords(img, point_in_rect(0.38, 0.50, 0.81, 0.88), c)\n\n # Ring\n fill_coords(img, point_in_circle(cx=0.56, cy=0.28, r=0.190), c)\n fill_coords(img, point_in_circle(cx=0.56, cy=0.28, r=0.064), (0,0,0))\n\nclass Ball(WorldObj):\n def __init__(self, color='blue'):\n super(Ball, self).__init__('ball', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n fill_coords(img, point_in_circle(0.5, 0.5, 0.31), COLORS[self.color])\n\nclass Circle(WorldObj):\n def __init__(self, color='blue'):\n super(Circle, self).__init__('circle', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n fill_coords(img, point_in_circle(0.5, 0.5, 0.31), COLORS[self.color])\n\nclass Box(WorldObj):\n def __init__(self, color, contains=None):\n super(Box, self).__init__('box', color)\n self.contains = contains\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n c = COLORS[self.color]\n\n # Outline\n fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), c)\n fill_coords(img, point_in_rect(0.18, 0.82, 0.18, 0.82), (0,0,0))\n\n # Horizontal slit\n fill_coords(img, point_in_rect(0.16, 0.84, 0.47, 0.53), c)\n\n def toggle(self, env, pos):\n # Replace the box by its contents\n env.grid.set(*pos, self.contains)\n return True\n\nclass Square(WorldObj):\n def __init__(self, color):\n super(Square, self).__init__('square', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n c = COLORS[self.color]\n # Outline\n fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), c)\n\n\nclass Crate(WorldObj):\n def __init__(self, color):\n super(Crate, self).__init__('crate', color)\n\n def can_pickup(self):\n return True\n\n def render(self, img):\n c = COLORS[self.color]\n # Outline\n fill_coords(img, point_in_rect(0.1, 0.9, 0.3, 0.7), c)\n\n\nclass Grid:\n \"\"\"\n Represent a grid and operations on it\n \"\"\"\n\n # Static cache of pre-renderer tiles\n tile_cache = {}\n\n def __init__(self, width, height):\n assert width >= 3\n assert height >= 3\n\n self.width = width\n self.height = height\n\n self.grid = [None] * width * height\n\n def __contains__(self, key):\n if isinstance(key, WorldObj):\n for e in self.grid:\n if e is key:\n return True\n elif isinstance(key, tuple):\n for e in self.grid:\n if e is None:\n continue\n if (e.color, e.type) == key:\n return True\n if key[0] is None and key[1] == e.type:\n return True\n return False\n\n def __eq__(self, other):\n grid1 = self.encode()\n grid2 = other.encode()\n return np.array_equal(grid2, grid1)\n\n def __ne__(self, other):\n return not self == other\n\n def copy(self):\n from copy import deepcopy\n return deepcopy(self)\n\n def set(self, i, j, v):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n self.grid[j * self.width + i] = v\n\n def get(self, i, j):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n return self.grid[j * self.width + i]\n\n def horz_wall(self, x, y, length=None, obj_type=Wall):\n if length is None:\n length = self.width - x\n for i in range(0, length):\n self.set(x + i, y, obj_type())\n\n def vert_wall(self, x, y, length=None, obj_type=Wall):\n if length is None:\n length = self.height - y\n for j in range(0, length):\n self.set(x, y + j, obj_type())\n\n def wall_rect(self, x, y, w, h):\n self.horz_wall(x, y, w)\n self.horz_wall(x, y+h-1, w)\n self.vert_wall(x, y, h)\n self.vert_wall(x+w-1, y, h)\n\n def rotate_left(self):\n \"\"\"\n Rotate the grid to the left (counter-clockwise)\n \"\"\"\n\n grid = Grid(self.height, self.width)\n\n for i in range(self.width):\n for j in range(self.height):\n v = self.get(i, j)\n grid.set(j, grid.height - 1 - i, v)\n\n return grid\n\n def slice(self, topX, topY, width, height):\n \"\"\"\n Get a subset of the grid\n \"\"\"\n\n grid = Grid(width, height)\n\n for j in range(0, height):\n for i in range(0, width):\n x = topX + i\n y = topY + j\n\n if x >= 0 and x < self.width and \\\n y >= 0 and y < self.height:\n v = self.get(x, y)\n else:\n v = Wall()\n\n grid.set(i, j, v)\n\n return grid\n\n @classmethod\n def render_tile(\n cls,\n obj,\n agent_dir=None,\n highlight=False,\n tile_size=TILE_PIXELS,\n subdivs=3\n ):\n \"\"\"\n Render a tile and cache the result\n \"\"\"\n\n # Hash map lookup key for the cache\n key = (agent_dir, highlight, tile_size)\n key = obj.encode() + key if obj else key\n\n if key in cls.tile_cache:\n return cls.tile_cache[key]\n\n img = np.zeros(shape=(tile_size * subdivs, tile_size * subdivs, 3), dtype=np.uint8)\n\n # Draw the grid lines (top and left edges)\n fill_coords(img, point_in_rect(0, 0.031, 0, 1), (100, 100, 100))\n fill_coords(img, point_in_rect(0, 1, 0, 0.031), (100, 100, 100))\n\n if obj != None:\n obj.render(img)\n\n # Overlay the agent on top\n if agent_dir is not None:\n tri_fn = point_in_triangle(\n (0.12, 0.19),\n (0.87, 0.50),\n (0.12, 0.81),\n )\n\n # Rotate the agent based on its direction\n tri_fn = rotate_fn(tri_fn, cx=0.5, cy=0.5, theta=0.5*math.pi*agent_dir)\n fill_coords(img, tri_fn, (255, 0, 0))\n\n # Highlight the cell if needed\n if highlight:\n highlight_img(img)\n\n # Downsample the image to perform supersampling/anti-aliasing\n img = downsample(img, subdivs)\n\n # Cache the rendered tile\n cls.tile_cache[key] = img\n\n return img\n\n def render(\n self,\n tile_size,\n agent_pos=None,\n agent_dir=None,\n highlight_mask=None\n ):\n \"\"\"\n Render this grid at a given scale\n :param r: target renderer object\n :param tile_size: tile size in pixels\n \"\"\"\n\n if highlight_mask is None:\n highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)\n\n # Compute the total grid size\n width_px = self.width * tile_size\n height_px = self.height * tile_size\n\n img = np.zeros(shape=(height_px, width_px, 3), dtype=np.uint8)\n\n # Render the grid\n for j in range(0, self.height):\n for i in range(0, self.width):\n cell = self.get(i, j)\n\n agent_here = np.array_equal(agent_pos, (i, j))\n tile_img = Grid.render_tile(\n cell,\n agent_dir=agent_dir if agent_here else None,\n highlight=highlight_mask[i, j],\n tile_size=tile_size\n )\n\n ymin = j * tile_size\n ymax = (j+1) * tile_size\n xmin = i * tile_size\n xmax = (i+1) * tile_size\n img[ymin:ymax, xmin:xmax, :] = tile_img\n\n return img\n\n def encode(self, vis_mask=None):\n \"\"\"\n Produce a compact numpy encoding of the grid\n \"\"\"\n\n if vis_mask is None:\n vis_mask = np.ones((self.width, self.height), dtype=bool)\n\n array = np.zeros((self.width, self.height, 3), dtype='uint8')\n\n for i in range(self.width):\n for j in range(self.height):\n if vis_mask[i, j]:\n v = self.get(i, j)\n\n if v is None:\n array[i, j, 0] = OBJECT_TO_IDX['empty']\n array[i, j, 1] = 0\n array[i, j, 2] = 0\n\n else:\n array[i, j, :] = v.encode()\n\n return array\n\n @staticmethod\n def decode(array):\n \"\"\"\n Decode an array grid encoding back into a grid\n \"\"\"\n\n width, height, channels = array.shape\n assert channels == 3\n\n vis_mask = np.ones(shape=(width, height), dtype=np.bool)\n\n grid = Grid(width, height)\n for i in range(width):\n for j in range(height):\n type_idx, color_idx, state = array[i, j]\n v = WorldObj.decode(type_idx, color_idx, state)\n grid.set(i, j, v)\n vis_mask[i, j] = (type_idx != OBJECT_TO_IDX['unseen'])\n\n return grid, vis_mask\n\n def process_vis(grid, agent_pos):\n mask = np.zeros(shape=(grid.width, grid.height), dtype=np.bool)\n\n mask[agent_pos[0], agent_pos[1]] = True\n\n for j in reversed(range(0, grid.height)):\n for i in range(0, grid.width-1):\n if not mask[i, j]:\n continue\n\n cell = grid.get(i, j)\n if cell and not cell.see_behind():\n continue\n\n mask[i+1, j] = True\n if j > 0:\n mask[i+1, j-1] = True\n mask[i, j-1] = True\n\n for i in reversed(range(1, grid.width)):\n if not mask[i, j]:\n continue\n\n cell = grid.get(i, j)\n if cell and not cell.see_behind():\n continue\n\n mask[i-1, j] = True\n if j > 0:\n mask[i-1, j-1] = True\n mask[i, j-1] = True\n\n for j in range(0, grid.height):\n for i in range(0, grid.width):\n if not mask[i, j]:\n grid.set(i, j, None)\n\n return mask\n\nclass MiniGridEnv(gym.Env):\n \"\"\"\n 2D grid world game environment\n \"\"\"\n\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 10\n }\n\n # Enumeration of possible actions\n class Actions(IntEnum):\n # Turn left, turn right, move forward\n left = 0\n right = 1\n forward = 2\n\n # Pick up an object\n pickup = 3\n # Drop an object\n drop = 4\n # Toggle/activate an object\n toggle = 5\n\n # Done completing task\n done = 6\n\n def __init__(\n self,\n grid_size=None,\n width=None,\n height=None,\n max_steps=100,\n see_through_walls=False,\n seed=1337,\n agent_view_size=7\n ):\n # Can't set both grid_size and width/height\n if grid_size:\n assert width == None and height == None\n width = grid_size\n height = grid_size\n\n # Action enumeration for this environment\n self.actions = MiniGridEnv.Actions\n\n # Actions are discrete integer values\n self.action_space = spaces.Discrete(len(self.actions))\n\n # Number of cells (width and height) in the agent view\n assert agent_view_size % 2 == 1\n assert agent_view_size >= 3\n self.agent_view_size = agent_view_size\n\n # Observations are dictionaries containing an\n # encoding of the grid and a textual 'mission' string\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(self.agent_view_size, self.agent_view_size, 3),\n dtype='uint8'\n )\n self.observation_space = spaces.Dict({\n 'image': self.observation_space\n })\n\n # Range of possible rewards\n self.reward_range = (0, 1)\n\n # Window to use for human rendering mode\n self.window = None\n\n # Environment configuration\n self.width = width\n self.height = height\n self.max_steps = max_steps\n self.see_through_walls = see_through_walls\n\n # Current position and direction of the agent\n self.agent_pos = None\n self.agent_dir = None\n\n # Initialize the RNG\n self.seed(seed=seed)\n\n # Initialize the state\n self.reset()\n\n def reset(self):\n # Current position and direction of the agent\n self.agent_pos = None\n self.agent_dir = None\n\n # Generate a new random grid at the start of each episode\n # To keep the same grid for each episode, call env.seed() with\n # the same seed before calling env.reset()\n self._gen_grid(self.width, self.height)\n\n # These fields should be defined by _gen_grid\n assert self.agent_pos is not None\n assert self.agent_dir is not None\n\n # Check that the agent doesn't overlap with an object\n start_cell = self.grid.get(*self.agent_pos)\n assert start_cell is None or start_cell.can_overlap()\n\n # Item picked up, being carried, initially nothing\n self.carrying = None\n\n # Step count since episode start\n self.step_count = 0\n\n # Return first observation\n obs = self.gen_obs()\n return obs\n\n def seed(self, seed=1337):\n # Seed the random number generator\n self.np_random, _ = seeding.np_random(seed)\n return [seed]\n\n def hash(self, size=16):\n \"\"\"Compute a hash that uniquely identifies the current state of the environment.\n :param size: Size of the hashing\n \"\"\"\n sample_hash = hashlib.sha256()\n\n to_encode = [self.grid.encode().tolist(), self.agent_pos, self.agent_dir]\n for item in to_encode:\n sample_hash.update(str(item).encode('utf8'))\n\n return sample_hash.hexdigest()[:size]\n\n @property\n def steps_remaining(self):\n return self.max_steps - self.step_count\n\n def __str__(self):\n \"\"\"\n Produce a pretty string of the environment's grid along with the agent.\n A grid cell is represented by 2-character string, the first one for\n the object and the second one for the color.\n \"\"\"\n\n # Map of object types to short string\n OBJECT_TO_STR = {\n 'wall' : 'W',\n 'floor' : 'F',\n 'door' : 'D',\n 'key' : 'K',\n 'ball' : 'A',\n 'box' : 'B',\n 'goal' : 'G',\n 'lava' : 'V',\n }\n\n # Short string for opened door\n OPENDED_DOOR_IDS = '_'\n\n # Map agent's direction to short string\n AGENT_DIR_TO_STR = {\n 0: '>',\n 1: 'V',\n 2: '<',\n 3: '^'\n }\n\n str = ''\n\n for j in range(self.grid.height):\n\n for i in range(self.grid.width):\n if i == self.agent_pos[0] and j == self.agent_pos[1]:\n str += 2 * AGENT_DIR_TO_STR[self.agent_dir]\n continue\n\n c = self.grid.get(i, j)\n\n if c == None:\n str += ' '\n continue\n\n if c.type == 'door':\n if c.is_open:\n str += '__'\n elif c.is_locked:\n str += 'L' + c.color[0].upper()\n else:\n str += 'D' + c.color[0].upper()\n continue\n\n str += OBJECT_TO_STR[c.type] + c.color[0].upper()\n\n if j < self.grid.height - 1:\n str += '\\n'\n\n return str\n\n def _gen_grid(self, width, height):\n assert False, \"_gen_grid needs to be implemented by each environment\"\n\n def _reward(self):\n \"\"\"\n Compute the reward to be given upon success\n \"\"\"\n\n return 1 - 0.9 * (self.step_count / self.max_steps)\n\n def _rand_int(self, low, high):\n \"\"\"\n Generate random integer in [low,high]\n \"\"\"\n\n return self.np_random.randint(low, high)\n\n def _rand_float(self, low, high):\n \"\"\"\n Generate random float in [low,high]\n \"\"\"\n\n return self.np_random.uniform(low, high)\n\n def _rand_bool(self):\n \"\"\"\n Generate random boolean value\n \"\"\"\n\n return (self.np_random.randint(0, 2) == 0)\n\n def _rand_elem(self, iterable):\n \"\"\"\n Pick a random element in a list\n \"\"\"\n\n lst = list(iterable)\n idx = self._rand_int(0, len(lst))\n return lst[idx]\n\n def _rand_subset(self, iterable, num_elems):\n \"\"\"\n Sample a random subset of distinct elements of a list\n \"\"\"\n\n lst = list(iterable)\n assert num_elems <= len(lst)\n\n out = []\n\n while len(out) < num_elems:\n elem = self._rand_elem(lst)\n lst.remove(elem)\n out.append(elem)\n\n return out\n\n def _rand_color(self):\n \"\"\"\n Generate a random color name (string)\n \"\"\"\n\n return self._rand_elem(COLOR_NAMES)\n\n def _rand_pos(self, xLow, xHigh, yLow, yHigh):\n \"\"\"\n Generate a random (x,y) position tuple\n \"\"\"\n\n return (\n self.np_random.randint(xLow, xHigh),\n self.np_random.randint(yLow, yHigh)\n )\n\n def place_obj(self,\n obj,\n top=None,\n size=None,\n reject_fn=None,\n max_tries=math.inf\n ):\n \"\"\"\n Place an object at an empty position in the grid\n\n :param top: top-left position of the rectangle where to place\n :param size: size of the rectangle where to place\n :param reject_fn: function to filter out potential positions\n \"\"\"\n\n if top is None:\n top = (0, 0)\n else:\n top = (max(top[0], 0), max(top[1], 0))\n\n if size is None:\n size = (self.grid.width, self.grid.height)\n\n num_tries = 0\n\n while True:\n # This is to handle with rare cases where rejection sampling\n # gets stuck in an infinite loop\n if num_tries > max_tries:\n raise RecursionError('rejection sampling failed in place_obj')\n\n num_tries += 1\n\n pos = np.array((\n self._rand_int(top[0], min(top[0] + size[0], self.grid.width)),\n self._rand_int(top[1], min(top[1] + size[1], self.grid.height))\n ))\n\n # Don't place the object on top of another object\n if self.grid.get(*pos) != None:\n continue\n\n # Don't place the object where the agent is\n if np.array_equal(pos, self.agent_pos):\n continue\n\n # Check if there is a filtering criterion\n if reject_fn and reject_fn(self, pos):\n continue\n\n break\n\n self.grid.set(*pos, obj)\n\n if obj is not None:\n obj.init_pos = pos\n obj.cur_pos = pos\n\n return pos\n\n def put_obj(self, obj, i, j):\n \"\"\"\n Put an object at a specific position in the grid\n \"\"\"\n\n self.grid.set(i, j, obj)\n obj.init_pos = (i, j)\n obj.cur_pos = (i, j)\n\n def place_agent(\n self,\n top=None,\n size=None,\n rand_dir=True,\n max_tries=math.inf\n ):\n \"\"\"\n Set the agent's starting point at an empty position in the grid\n \"\"\"\n\n self.agent_pos = None\n pos = self.place_obj(None, top, size, max_tries=max_tries)\n self.agent_pos = pos\n\n if rand_dir:\n self.agent_dir = self._rand_int(0, 4)\n\n return pos\n\n @property\n def dir_vec(self):\n \"\"\"\n Get the direction vector for the agent, pointing in the direction\n of forward movement.\n \"\"\"\n\n assert self.agent_dir >= 0 and self.agent_dir < 4\n return DIR_TO_VEC[self.agent_dir]\n\n @property\n def right_vec(self):\n \"\"\"\n Get the vector pointing to the right of the agent.\n \"\"\"\n\n dx, dy = self.dir_vec\n return np.array((-dy, dx))\n\n @property\n def front_pos(self):\n \"\"\"\n Get the position of the cell that is right in front of the agent\n \"\"\"\n\n return self.agent_pos + self.dir_vec\n\n def get_view_coords(self, i, j):\n \"\"\"\n Translate and rotate absolute grid coordinates (i, j) into the\n agent's partially observable view (sub-grid). Note that the resulting\n coordinates may be negative or outside of the agent's view size.\n \"\"\"\n\n ax, ay = self.agent_pos\n dx, dy = self.dir_vec\n rx, ry = self.right_vec\n\n # Compute the absolute coordinates of the top-left view corner\n sz = self.agent_view_size\n hs = self.agent_view_size // 2\n tx = ax + (dx * (sz-1)) - (rx * hs)\n ty = ay + (dy * (sz-1)) - (ry * hs)\n\n lx = i - tx\n ly = j - ty\n\n # Project the coordinates of the object relative to the top-left\n # corner onto the agent's own coordinate system\n vx = (rx*lx + ry*ly)\n vy = -(dx*lx + dy*ly)\n\n return vx, vy\n\n def get_view_exts(self):\n \"\"\"\n Get the extents of the square set of tiles visible to the agent\n Note: the bottom extent indices are not included in the set\n \"\"\"\n\n # Facing right\n if self.agent_dir == 0:\n topX = self.agent_pos[0]\n topY = self.agent_pos[1] - self.agent_view_size // 2\n # Facing down\n elif self.agent_dir == 1:\n topX = self.agent_pos[0] - self.agent_view_size // 2\n topY = self.agent_pos[1]\n # Facing left\n elif self.agent_dir == 2:\n topX = self.agent_pos[0] - self.agent_view_size + 1\n topY = self.agent_pos[1] - self.agent_view_size // 2\n # Facing up\n elif self.agent_dir == 3:\n topX = self.agent_pos[0] - self.agent_view_size // 2\n topY = self.agent_pos[1] - self.agent_view_size + 1\n else:\n assert False, \"invalid agent direction\"\n\n botX = topX + self.agent_view_size\n botY = topY + self.agent_view_size\n\n return (topX, topY, botX, botY)\n\n def relative_coords(self, x, y):\n \"\"\"\n Check if a grid position belongs to the agent's field of view, and returns the corresponding coordinates\n \"\"\"\n\n vx, vy = self.get_view_coords(x, y)\n\n if vx < 0 or vy < 0 or vx >= self.agent_view_size or vy >= self.agent_view_size:\n return None\n\n return vx, vy\n\n def in_view(self, x, y):\n \"\"\"\n check if a grid position is visible to the agent\n \"\"\"\n\n return self.relative_coords(x, y) is not None\n\n def agent_sees(self, x, y):\n \"\"\"\n Check if a non-empty grid position is visible to the agent\n \"\"\"\n\n coordinates = self.relative_coords(x, y)\n if coordinates is None:\n return False\n vx, vy = coordinates\n\n obs = self.gen_obs()\n obs_grid, _ = Grid.decode(obs['image'])\n obs_cell = obs_grid.get(vx, vy)\n world_cell = self.grid.get(x, y)\n\n return obs_cell is not None and obs_cell.type == world_cell.type\n\n def step(self, action):\n self.step_count += 1\n\n reward = -(1 / self.max_steps)\n done = False\n\n # Get the position in front of the agent\n fwd_pos = self.front_pos\n\n # Get the contents of the cell in front of the agent\n fwd_cell = self.grid.get(*fwd_pos)\n\n # Rotate left\n if action == self.actions.left:\n self.agent_dir -= 1\n if self.agent_dir < 0:\n self.agent_dir += 4\n\n # Rotate right\n elif action == self.actions.right:\n self.agent_dir = (self.agent_dir + 1) % 4\n\n # Move forward\n elif action == self.actions.forward:\n if fwd_cell == None or fwd_cell.can_overlap():\n self.agent_pos = fwd_pos\n if fwd_cell != None and fwd_cell.type == 'goal':\n done = True\n reward = self._reward()\n if fwd_cell != None and fwd_cell.type == 'lava':\n done = True\n\n # Pick up an object\n elif action == self.actions.pickup:\n if fwd_cell and fwd_cell.can_pickup():\n if self.carrying is None:\n self.carrying = fwd_cell\n self.carrying.cur_pos = np.array([-1, -1])\n self.grid.set(*fwd_pos, None)\n\n # Drop an object\n elif action == self.actions.drop:\n if not fwd_cell and self.carrying:\n self.grid.set(*fwd_pos, self.carrying)\n self.carrying.cur_pos = fwd_pos\n self.carrying = None\n\n # Toggle/activate an object\n elif action == self.actions.toggle:\n if fwd_cell:\n fwd_cell.toggle(self, fwd_pos)\n\n # Done action (not used by default)\n elif action == self.actions.done:\n pass\n\n else:\n assert False, \"unknown action\"\n\n if self.step_count >= self.max_steps:\n done = True\n\n obs = self.gen_obs()\n\n return obs, reward, done, {}\n\n def gen_obs_grid(self):\n \"\"\"\n Generate the sub-grid observed by the agent.\n This method also outputs a visibility mask telling us which grid\n cells the agent can actually see.\n \"\"\"\n\n topX, topY, botX, botY = self.get_view_exts()\n\n grid = self.grid.slice(topX, topY, self.agent_view_size, self.agent_view_size)\n\n for i in range(self.agent_dir + 1):\n grid = grid.rotate_left()\n\n # Process occluders and visibility\n # Note that this incurs some performance cost\n if not self.see_through_walls:\n vis_mask = grid.process_vis(agent_pos=(self.agent_view_size // 2 , self.agent_view_size - 1))\n else:\n vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool)\n\n # Make it so the agent sees what it's carrying\n # We do this by placing the carried object at the agent's position\n # in the agent's partially observable view\n agent_pos = grid.width // 2, grid.height - 1\n if self.carrying:\n grid.set(*agent_pos, self.carrying)\n else:\n grid.set(*agent_pos, None)\n\n return grid, vis_mask\n\n def gen_obs(self):\n \"\"\"\n Generate the agent's view (partially observable, low-resolution encoding)\n \"\"\"\n\n grid, vis_mask = self.gen_obs_grid()\n\n # Encode the partially observable view into a numpy array\n image = grid.encode(vis_mask)\n\n assert hasattr(self, 'mission'), \"environments must define a textual mission string\"\n\n # Observations are dictionaries containing:\n # - an image (partially observable view of the environment)\n # - the agent's direction/orientation (acting as a compass)\n # - a textual mission string (instructions for the agent)\n obs = {\n 'image': image,\n 'direction': self.agent_dir,\n 'mission': self.mission,\n 'target_cell': self.target_cell,\n 'obj_descs': self.obj_descs\n }\n\n return obs\n\n def get_obs_render(self, obs, tile_size=TILE_PIXELS//2):\n \"\"\"\n Render an agent observation for visualization\n \"\"\"\n\n grid, vis_mask = Grid.decode(obs)\n\n # Render the whole grid\n img = grid.render(\n tile_size,\n agent_pos=(self.agent_view_size // 2, self.agent_view_size - 1),\n agent_dir=3,\n highlight_mask=vis_mask\n )\n\n return img\n\n def render(self, mode='human', close=False, highlight=True, tile_size=TILE_PIXELS):\n \"\"\"\n Render the whole-grid human view\n \"\"\"\n\n if close:\n if self.window:\n self.window.close()\n return\n\n if mode == 'human' and not self.window:\n import gym_minigrid.window\n self.window = gym_minigrid.window.Window('gym_minigrid')\n self.window.show(block=False)\n\n # Compute which cells are visible to the agent\n _, vis_mask = self.gen_obs_grid()\n\n # Compute the world coordinates of the bottom-left corner\n # of the agent's view area\n f_vec = self.dir_vec\n r_vec = self.right_vec\n top_left = self.agent_pos + f_vec * (self.agent_view_size-1) - r_vec * (self.agent_view_size // 2)\n\n # Mask of which cells to highlight\n highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)\n\n # For each cell in the visibility mask\n for vis_j in range(0, self.agent_view_size):\n for vis_i in range(0, self.agent_view_size):\n # If this cell is not visible, don't highlight it\n if not vis_mask[vis_i, vis_j]:\n continue\n\n # Compute the world coordinates of this cell\n abs_i, abs_j = top_left - (f_vec * vis_j) + (r_vec * vis_i)\n\n if abs_i < 0 or abs_i >= self.width:\n continue\n if abs_j < 0 or abs_j >= self.height:\n continue\n\n # Mark this cell to be highlighted\n highlight_mask[abs_i, abs_j] = True\n\n # Render the whole grid\n img = self.grid.render(\n tile_size,\n self.agent_pos,\n self.agent_dir,\n highlight_mask=highlight_mask if highlight else None\n )\n\n if mode == 'human':\n self.window.set_caption(self.mission)\n self.window.show_img(img)\n\n return img\n\n def close(self):\n if self.window:\n self.window.close()\n return\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.array_equal",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jamesxiu/Oystermaran2021
|
[
"f3703bb220dc5b415942f90bd5761a9985381067"
] |
[
"bag_detection-master/scripts/util.py"
] |
[
"#!/usr/bin/env python3\n\nimport numpy as np\nimport cv2\nimport tensorflow as tf\n\nimport sys\nsys.path.append(\"/home/oyster/Tensorflow/Monk_Object_Detection/13_tf_obj_2/lib/\")\nfrom infer_detector_nano import Infer\n\nfrom bag_detection.msg import FlipPos, PathPos\n\n\ndef get_rectangles(mask, threshold_area):\n \"\"\"\n Extract defined color from image and return rectangles coordinates of large enough contours on given side\n Input: \n mask: Binary Image\n threshold_area: int\n Output:\n list of 1x4 tuples (x, y, w, h) of color blobs \n \"\"\"\n contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n rectangles = []\n for contour in contours:\n if cv2.contourArea(contour) > threshold_area:\n rect = cv2.boundingRect(contour)\n rectangles.append(rect)\n return rectangles\n\n\ndef get_contours(mask, threshold_area):\n \"\"\"\n Extract defined color from image and return large contours (UNUSED)\n Input: \n cv_image: Image (BGR)\n lower_range: 1x3 tuple representing lower HSV for target color\n upper_range: 1x3 tuple representing upper HSV for target color\n threshold_area: int\n Output:\n list of openCV contours \n \"\"\"\n contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n return [x for x in contours if cv2.contourArea(x) > threshold_area], hierarchy\n\n\n\ndef color_segmentation(image, lower, upper):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, np.array(lower), np.array(upper))\n return mask\n\n\ndef get_mask_pixels(mask):\n return np.transpose((mask>0).nonzero())\n\n\ndef get_avg_depth(depth_img, pixels, low_thres=0, high_thres=1000):\n avg_depth = 0\n i = 0\n for x,y in pixels:\n depth = depth_img[x][y]\n # print(depth)\n if depth > low_thres and depth < high_thres: \n avg_depth += depth\n i += 1\n\n return avg_depth/i\n\n\ndef get_region_box(mask, area=100, side='bottom', image=None):\n left = mask.shape[1]\n right = 0\n top = mask.shape[0]\n bot = 0\n box = None\n\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for contour in contours:\n if cv2.contourArea(contour) > area:\n rect = cv2.boundingRect(contour)\n if image:\n tl = (rect[0], rect[1])\n br = (rect[0]+rect[2], rect[1]+rect[3])\n cv2.rectangle(image, tl, br, (255,0,0), 2)\n if side == 'left':\n if rect[0] < left:\n left = rect[0]\n box = rect\n elif side == 'right':\n if rect[0] > right:\n right = rect[0]\n box = rect\n elif side == 'top':\n if rect[1] < top:\n top = rect[1]\n box = rect\n else:\n if rect[1] > bot:\n bot = rect[1]\n box = rect\n if image:\n cv2.rectangle(image, (box[0], box[1]), (box[0]+box[2], box[1]+box[3]), (0,0,255), 2)\n return box\n\n\ndef get_tf2_detect_fn(path):\n detect_fn=tf.saved_model.load(path)\n return detect_fn\n\ndef detect_objects(detect_fn, image, width=1280, height=720, min_score_thres=0.5):\n image_np = np.array(image)\n input_tensor=tf.convert_to_tensor(image_np)\n input_tensor=input_tensor[tf.newaxis, ...]\n detections=detect_fn(input_tensor)\n print(type(detections))\n\n # This is the way I'm getting my coordinates\n boxes = detections['detection_boxes'][0]\n # print(boxes)\n # get all boxes from an array\n max_boxes_to_draw = boxes.shape[0]\n # get scores to get a threshold\n scores = detections['detection_scores'][0]\n # print(scores)\n # this is set as a default but feel free to adjust it to your needs\n \n # iterate over all objects found\n objects = []\n for i in range(min(max_boxes_to_draw, boxes.shape[0])): \n if scores is None or scores[i] > min_score_thres:\n class_name = detections['detection_classes'][0][i].numpy()\n\n y_min, x_min, y_max, x_max = boxes[i].numpy()\n tl, br = ((int(x_min*width), int(y_min*height)), (int(x_max*width), int(y_max*height)))\n detection = {'class':class_name, 'box': (tl, br)}\n objects.append(detection)\n\n return objects\n\n\ndef get_gtf():\n gtf = Infer();\n print(\"GTFF INITIALIZEDDDDDDDDDDDDDDDDDDDDDDDDDDD\")\n gtf.set_dataset_params(class_list_file = '/home/oyster/Tensorflow/oyster_bag/classes.txt')\n print(\"DATA SET PARAMMMS SETTTTTT\")\n gtf.set_model_params(exported_model_dir = '/home/oyster/Tensorflow/trt_fp16_dir')\n\n return gtf\n\n\ndef gtf_detect_objects(gtf, image_np, min_score_thres=0.5, width=1280, height=720):\n input_tensor = tf.convert_to_tensor(image_np)\n input_tensor = input_tensor[tf.newaxis, ...]\n scores, bboxes, labels = gtf.infer_on_tensor(input_tensor, thresh=0.8);\n \n return bboxes\n\n\ndef get_element(dilation_size, dilation_shape=cv2.MORPH_RECT):\n return cv2.getStructuringElement(dilation_shape, (2 * dilation_size + 1, 2 * dilation_size + 1),\n (dilation_size, dilation_size))\n\ndef canny(img, thres1=100, thres2=200, aperture=1):\n return cv2.Canny(img, thres1, thres2, aperture)\n\n\ndef dilate_bag_row(edges, element):\n return cv2.morphologyEx(edges, cv2.MORPH_CLOSE, element)\n\n\ndef directional_shear(closed, element, vertical=1, shearing_factor=50, shape=cv2.MORPH_RECT):\n # dims = closed.shape[1]\n size = (closed.shape[1] // shearing_factor, 1)\n\n if (vertical):\n # dims = closed.shape[0]\n size = (1, closed.shape[0] // shearing_factor)\n\n structure = cv2.getStructuringElement(shape, size)\n closed = cv2.erode(closed, structure)\n closed = cv2.dilate(closed, structure)\n return cv2.morphologyEx(closed, cv2.MORPH_CLOSE, element)\n\n\ndef bag_rect_detection(img, vertical=1, threshold_area_prop = 0.025, dilation_size=9, dilation_shape=cv2.MORPH_RECT, thres1=100, thres2=200, aperture=1, shearing_factor=50):\n element = get_element(dilation_size, dilation_shape)\n edges = canny(img, thres1, thres2, aperture)\n closed = dilate_bag_row(edges, element)\n closed = directional_shear(closed, element, vertical, shearing_factor, dilation_shape)\n h, w = img.shape[:2]\n threshold_area = threshold_area_prop*h*w \n c_rects = get_rectangles(closed, threshold_area)\n\n return c_rects\n\n\ndef create_flip_pos_msg(top=False, bot=False):\n\n msg = FlipPos()\n msg.top = top\n msg.bot = bot\n msg.top_x = float('inf')\n msg.top_y = float('inf')\n msg.bot_x = float('inf')\n msg.bot_y = float('inf')\n\n return msg\n"
] |
[
[
"tensorflow.convert_to_tensor",
"numpy.array",
"tensorflow.saved_model.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chloeyutianyi/pytorch
|
[
"6a085648d81ce88ff59d6d1438fdb3707a0d6fb7",
"6a085648d81ce88ff59d6d1438fdb3707a0d6fb7"
] |
[
"test/quantization/core/test_workflow_ops.py",
"torch/fx/experimental/graph_gradual_typechecker.py"
] |
[
"import torch\nfrom torch.quantization import (\n FakeQuantize,\n MovingAverageMinMaxObserver,\n default_observer,\n default_affine_fixed_qparams_fake_quant,\n)\n\nfrom torch.quantization._learnable_fake_quantize import _LearnableFakeQuantize\nfrom torch.testing._internal.common_quantized import (\n _fake_quantize_per_channel_affine_reference,\n _fake_quantize_per_channel_affine_grad_reference,\n to_tensor,\n)\nimport torch.nn as nn\n\n# Standard library\nimport io\nimport itertools\nimport unittest\nimport numpy as np\n\n# Testing utils\nfrom hypothesis import given\nfrom hypothesis import strategies as st\nimport torch.testing._internal.hypothesis_utils as hu\nhu.assert_deadline_disabled()\nfrom torch.testing._internal.common_cuda import TEST_CUDA\nfrom torch.testing._internal.common_utils import TestCase\n\n# Reference method for fake quantize\n# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64\ndef _fake_quantize_per_tensor_affine_reference(X, scale, zero_point, quant_min, quant_max):\n dtype = X.dtype\n res = ((torch.clamp(torch.round(X.to(torch.float32) * (1.0 / scale) + zero_point), quant_min, quant_max) - zero_point) * scale)\n return res.to(dtype)\n\n# Reference method for the gradient of the fake quantize operator\n# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64\ndef _fake_quantize_per_tensor_affine_grad_reference(dY, X, scale, zero_point, quant_min, quant_max):\n dtype = X.dtype\n Xq = torch.round(X.to(torch.float32) * (1.0 / scale) + zero_point)\n mask = (Xq >= quant_min) * (Xq <= quant_max)\n res = torch.zeros_like(dY)\n res[mask] = dY[mask]\n return res.to(dtype)\n\n# Reference method for the gradients of the fake quantize operator\ndef _fake_quantize_learnable_per_tensor_affine_grad_reference(dY, X, scale, zero_point, quant_min, quant_max, device):\n r\"\"\"This method references the following literatures for back propagation on scale and zero point.\n - https://arxiv.org/pdf/1902.08153.pdf\n - https://arxiv.org/pdf/1903.08066.pdf\n \"\"\"\n zero_point_rounded = int((zero_point + 0.5).clamp(quant_min, quant_max).item())\n Xq = torch.round(X * (1.0 / scale) + zero_point_rounded)\n\n indicate_small_scale = (Xq < quant_min).float().to(device)\n indicate_big_scale = (Xq > quant_max).float().to(device)\n indicate_middle_scale = torch.ones(indicate_small_scale.shape).to(device) - \\\n indicate_small_scale - indicate_big_scale\n\n indicate_saturate_zp = ((Xq < quant_min).float() + (Xq > quant_max).float()).to(device)\n indicate_unsaturate_zp = torch.ones(indicate_saturate_zp.shape).to(device) - indicate_saturate_zp\n\n Xq = Xq.clamp(quant_min, quant_max)\n Xfq = (Xq - zero_point_rounded) * scale\n\n grad_small_scale = quant_min - zero_point_rounded\n grad_big_scale = quant_max - zero_point_rounded\n grad_middle_scale = ((Xfq - X) / scale).to(device)\n\n grad_saturate_zp = -scale.to(device)\n grad_unsaturate_zp = 0\n\n grad_scale = indicate_small_scale * grad_small_scale + \\\n indicate_big_scale * grad_big_scale + \\\n indicate_middle_scale * grad_middle_scale\n grad_zp = indicate_saturate_zp * grad_saturate_zp + \\\n indicate_unsaturate_zp * grad_unsaturate_zp\n grad_X = _fake_quantize_per_tensor_affine_grad_reference(\n dY, X, scale, zero_point, quant_min, quant_max).to(device)\n\n grad_scale = (grad_scale * dY).sum().unsqueeze(dim=0)\n grad_zp = (grad_zp * dY).sum().unsqueeze(dim=0)\n return grad_X, grad_scale, grad_zp\n\n\n# Reference method for quantization.\ndef _quantize_per_tensor(x, scale, zero_point, quant_min, quant_max):\n return ((x / scale) + zero_point).round().clamp(quant_min, quant_max)\n\n# Reference method for the per channel gradients of the learnable fake quantize operator\ndef _fake_quantize_learnable_per_channel_affine_grad_reference(\n dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max, device):\n r\"\"\"This method references the following literatures for back propagation on scale and zero point.\n - https://arxiv.org/pdf/1902.08153.pdf\n - https://arxiv.org/pdf/1903.08066.pdf\n \"\"\"\n per_channel_zero_point = ((per_channel_zero_point.detach() + 0.5).clamp(quant_min, quant_max)).type(torch.int32)\n grad_X = _fake_quantize_per_channel_affine_grad_reference(\n dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max).to(device)\n per_channel_scale = per_channel_scale.detach().type(torch.float)\n\n grad_scale = torch.zeros([per_channel_scale.size(0)]).to(device)\n grad_zero_point = torch.zeros([per_channel_zero_point.size(0)]).to(device)\n\n X_flattened = torch.unbind(X, dim=axis)\n dY_flattened = torch.unbind(dY, dim=axis)\n\n for i, X_i in enumerate(torch.unbind(X, dim=axis), 0):\n scale_i = per_channel_scale[i]\n zero_point_i = per_channel_zero_point[i]\n X_i = X_flattened[i]\n dY_i = dY_flattened[i]\n\n Xq_i = ((X_i / scale_i) + zero_point_i).round()\n Xfq_i = (Xq_i - zero_point_i) * scale_i\n\n indicate_small_scale_i = (Xq_i < quant_min).float().to(device)\n indicate_big_scale_i = (Xq_i > quant_max).float().to(device)\n indicate_middle_scale_i = torch.ones(indicate_small_scale_i.shape).to(device) - \\\n indicate_small_scale_i - indicate_big_scale_i\n\n indicate_saturate_zp_i = ((Xq_i < quant_min).float() +\n (Xq_i > quant_max).float()).to(device)\n indicate_unsaturate_zp_i = torch.ones(indicate_saturate_zp_i.shape).to(device) - \\\n indicate_saturate_zp_i\n\n Xq_i = Xq_i.clamp(quant_min, quant_max)\n Xfq_i = (Xq_i - zero_point_i) * scale_i\n\n grad_small_scale_i = quant_min - zero_point_i\n grad_big_scale_i = quant_max - zero_point_i\n grad_middle_scale_i = ((Xfq_i - X_i) / scale_i).to(device)\n\n grad_saturate_zp_i = -scale_i.to(device)\n grad_unsaturate_zp_i = 0\n\n grad_scale_i = indicate_small_scale_i * grad_small_scale_i + \\\n indicate_middle_scale_i * grad_middle_scale_i + \\\n indicate_big_scale_i * grad_big_scale_i\n grad_zp_i = indicate_saturate_zp_i * grad_saturate_zp_i + \\\n indicate_unsaturate_zp_i * grad_unsaturate_zp_i\n\n grad_scale_i = (grad_scale_i * dY_i).sum().unsqueeze(dim=0)\n grad_zp_i = (grad_zp_i * dY_i).sum().unsqueeze(dim=0)\n\n grad_scale[i] = grad_scale_i\n grad_zero_point[i] = grad_zp_i\n return grad_X, grad_scale, grad_zero_point\n\nNP_RANDOM_SEED = 19\ntolerance = 1e-6\n\nclass TestFakeQuantizeOps(TestCase):\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_forward_per_tensor(self, device, X):\n r\"\"\"Tests the forward path of the FakeQuantizePerTensorAffine op.\n \"\"\"\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n Y = _fake_quantize_per_tensor_affine_reference(X.cpu(), scale, zero_point, quant_min, quant_max)\n Y_prime = torch.fake_quantize_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max)\n np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)\n\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n @unittest.skip(\"temporarily disable the test\")\n def test_backward_per_tensor(self, device, X):\n r\"\"\"Tests the backward method.\n \"\"\"\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n X.requires_grad_()\n Y = _fake_quantize_per_tensor_affine_reference(X.cpu(), scale, zero_point, quant_min, quant_max)\n Y_prime = torch.fake_quantize_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max)\n dout = torch.rand_like(X, dtype=torch.float).to(device)\n dX = _fake_quantize_per_tensor_affine_grad_reference(\n dout, X, scale, zero_point, quant_min, quant_max)\n Y_prime.backward(dout)\n np.testing.assert_allclose(dX.cpu(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n def test_forward_backward_per_tensor_with_amp(self):\n net = nn.Sequential(nn.Conv2d(1, 1, 3))\n net.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')\n net_prep = torch.quantization.prepare_qat(net)\n\n with torch.cuda.amp.autocast():\n x = torch.randn(4, 1, 5, 5)\n out = net_prep(x).sum()\n out.backward()\n self.assertTrue(net_prep[0].weight.grad is not None)\n\n def test_forward_per_tensor_half_precision_numerics(self):\n scale = .1\n zero = 0\n maxi = 255\n mini = 0\n\n for i in range(20):\n X1 = torch.randn(5, 5).to(torch.float16)\n Y1 = torch.fake_quantize_per_tensor_affine(X1, scale, zero, mini, maxi)\n Y1r = _fake_quantize_per_tensor_affine_reference(X1, scale, zero, mini, maxi)\n self.assertTrue(torch.allclose(Y1, Y1r, rtol=tolerance, atol=tolerance))\n\n # to force overflow\n X2 = torch.tensor(2**15 + .01).to(torch.float16)\n Y2 = torch.fake_quantize_per_tensor_affine(X2, scale, zero, mini, maxi)\n Y2r = _fake_quantize_per_tensor_affine_reference(X2, scale, zero, mini, maxi)\n self.assertTrue(torch.allclose(Y2, Y2r, rtol=tolerance, atol=tolerance))\n\n scale = 10\n\n # to force underflow\n X3 = torch.tensor(2**-24).to(torch.float16)\n Y3 = torch.fake_quantize_per_tensor_affine(X3, scale, zero, mini, maxi)\n Y3r = _fake_quantize_per_tensor_affine_reference(X3, scale, zero, mini, maxi)\n self.assertTrue(torch.allclose(Y3, Y3r, rtol=tolerance, atol=tolerance))\n\n def _test_forward_per_tensor_cachemask_impl(self, device):\n float_types = (torch.float32, torch.float16, torch.float64)\n torch_types = (torch.qint8, torch.quint8)\n Xs = (torch.randn(4, 8, device=device), torch.randn(4, 16, device=device)[:, ::2])\n tensor_qparam = (True, False)\n for float_type, torch_type, X, tensor_qparams in itertools.product(float_types, torch_types, Xs, tensor_qparam):\n # pick the scale + zp so that some values get clipped\n X = X.to(float_type)\n obs = torch.quantization.MinMaxObserver(torch_type)\n obs.to(device)\n obs(X * 0.75)\n scale, zero_point = obs.calculate_qparams()\n quant_min, quant_max = obs._calculate_qmin_qmax()\n if not tensor_qparam:\n scale, zero_point = float(scale), int(zero_point)\n Y_test = torch.fake_quantize_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max)\n Y_ref = _fake_quantize_per_tensor_affine_reference(\n X, scale, zero_point, quant_min, quant_max).to(device)\n self.assertTrue(torch.allclose(Y_test, Y_ref, rtol=tolerance, atol=tolerance))\n self.assertTrue(Y_test.dtype == float_type)\n\n def test_forward_per_tensor_cachemask_cpu(self):\n device = torch.device('cpu')\n self._test_forward_per_tensor_cachemask_impl(device)\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_forward_per_tensor_cachemask_cuda(self):\n device = torch.device('cuda')\n self._test_forward_per_tensor_cachemask_impl(device)\n\n def _test_backward_per_tensor_cachemask_impl(self, device):\n float_types = (torch.float32, torch.float16, torch.float64)\n torch_types = (torch.qint8, torch.quint8)\n tensor_qparam = (True, False)\n for float_type, torch_type, tensor_qparam in itertools.product(float_types, torch_types, tensor_qparam):\n X = torch.randn(4, 8).to(device).to(float_type)\n X.requires_grad_()\n # pick the scale + zp so that some values get clipped\n obs = torch.quantization.MinMaxObserver(torch_type)\n obs.to(device)\n obs(X * 0.75)\n scale, zero_point = obs.calculate_qparams()\n if not tensor_qparam:\n scale, zero_point = float(scale), int(zero_point)\n quant_min, quant_max = obs._calculate_qmin_qmax()\n\n # forward pass\n Y_test = torch.fake_quantize_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max)\n Y_ref = _fake_quantize_per_tensor_affine_reference(\n X, scale, zero_point, quant_min, quant_max).to(device)\n self.assertTrue(torch.allclose(Y_test, Y_ref, rtol=tolerance, atol=tolerance))\n\n # backward pass\n dout = torch.rand_like(X, dtype=torch.float).to(device)\n dX = _fake_quantize_per_tensor_affine_grad_reference(\n dout, X, scale, zero_point, quant_min, quant_max)\n Y_test.backward(dout)\n self.assertTrue(torch.allclose(dX, X.grad))\n self.assertTrue(X.grad.dtype == float_type)\n\n def test_backward_per_tensor_cachemask_cpu(self):\n device = torch.device('cpu')\n self._test_backward_per_tensor_cachemask_impl(device)\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_backward_per_tensor_cachemask_cuda(self):\n device = torch.device('cuda')\n self._test_backward_per_tensor_cachemask_impl(device)\n\n def _test_learnable_forward_per_tensor(self, X, device, scale_base, zero_point_base):\n X_base = torch.tensor(X).to(device)\n\n for n_bits in (4, 8):\n quant_min, quant_max = 0, 2 ** n_bits - 1\n\n X = X_base.clone().float()\n scale_base = scale_base.to(device).float()\n zero_point_base = zero_point_base.to(dtype=torch.int32, device=device)\n scale = scale_base.clone()\n zero_point = zero_point_base.clamp(quant_min, quant_max)\n\n Y = _fake_quantize_per_tensor_affine_reference(\n X, scale, zero_point, quant_min, quant_max).to(device)\n for grad_factor in [0.1, 1.0, 10.0]:\n Y_prime = torch._fake_quantize_learnable_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max, grad_factor).to(device)\n self.assertTrue(\n torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),\n \"Expected kernel forward function to have results match the reference forward function\")\n\n @given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_learnable_forward_per_tensor_cpu(self, X):\n X, (_, _, _) = X\n scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(1,))\n self._test_learnable_forward_per_tensor(\n X, 'cpu', scale_base, zero_point_base)\n\n @given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),\n qparams=hu.qparams(dtypes=torch.quint8)))\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_learnable_forward_per_tensor_cuda(self, X):\n X, (_, _, _) = X\n scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(1,))\n self._test_learnable_forward_per_tensor(\n X, 'cuda', scale_base, zero_point_base)\n\n def _test_learnable_backward_per_tensor(self, X, device, scale_base, zero_point_base):\n r\"\"\"Tests the backward method with additional backprop support for scale and zero point.\n \"\"\"\n X_base = torch.tensor(X).to(device)\n\n for n_bits in (4, 8):\n quant_min, quant_max = 0, 2 ** n_bits - 1\n\n X = X_base.clone().float().to(device)\n X.requires_grad_()\n scale_base = scale_base.to(device)\n zero_point_base = zero_point_base.to(device)\n scale = scale_base.clone()\n scale.requires_grad_()\n zero_point = zero_point_base.clone().clamp(quant_min, quant_max)\n zero_point.requires_grad_()\n for grad_factor in [0.1, 1.0, 10.0]:\n Y_prime = torch._fake_quantize_learnable_per_tensor_affine(\n X, scale, zero_point, quant_min, quant_max, grad_factor).to(device)\n dout = torch.rand_like(X, dtype=torch.float).to(device)\n dX, dScale, dZeroPoint = _fake_quantize_learnable_per_tensor_affine_grad_reference(\n dout, X, scale, zero_point, quant_min, quant_max, device)\n Y_prime.backward(dout)\n\n expected_dX = dX.to(device).detach()\n actual_dX = X.grad.to(device).detach()\n expected_dScale = dScale.to(device).detach()\n actual_dScale = scale.grad.to(device).detach()\n expected_dZeroPoint = dZeroPoint.to(device).detach()\n actual_dZeroPoint = zero_point.grad.to(device).detach()\n\n self.assertTrue(\n torch.allclose(\n expected_dX, actual_dX, rtol=tolerance, atol=tolerance),\n \"Expected dX to match X.grad\")\n self.assertTrue(\n torch.allclose(\n expected_dScale * grad_factor, actual_dScale, rtol=tolerance, atol=tolerance),\n \"Expected dScale to match scale.grad\")\n self.assertTrue(\n torch.allclose(\n expected_dZeroPoint * grad_factor, actual_dZeroPoint, rtol=tolerance, atol=tolerance),\n \"Expected dZeroPoint to match zero_point.grad\")\n X.grad.data.zero_()\n scale.grad.data.zero_()\n zero_point.grad.data.zero_()\n\n @given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_learnable_backward_per_tensor_cpu(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (_, _, _) = X\n scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(1,))\n self._test_learnable_backward_per_tensor(\n X, 'cpu', scale_base, zero_point_base)\n\n @given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),\n qparams=hu.qparams(dtypes=torch.quint8)))\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_learnable_backward_per_tensor_cuda(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (_, _, _) = X\n scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(1,))\n self._test_learnable_backward_per_tensor(\n X, 'cuda', scale_base, zero_point_base)\n\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=[torch.quint8])),\n )\n def test_fq_module_per_tensor(self, device, X):\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n X.requires_grad_()\n fq_module = torch.quantization.default_fake_quant().to(device)\n Y_prime = fq_module(X)\n assert fq_module.scale is not None\n assert fq_module.zero_point is not None\n Y = _fake_quantize_per_tensor_affine_reference(X, fq_module.scale, fq_module.zero_point, quant_min, quant_max)\n np.testing.assert_allclose(Y.cpu().detach().numpy(), Y_prime.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n # Test backward\n dout = torch.rand_like(X, dtype=torch.float, device=device)\n Y_prime.backward(dout)\n dX = _fake_quantize_per_tensor_affine_grad_reference(dout, X, fq_module.scale, fq_module.zero_point, quant_min, quant_max)\n np.testing.assert_allclose(dX.cpu().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_fixed_qparams_fq_module(self, device, X):\n X, (scale, zero_point, torch_type) = X\n X = to_tensor(X, device)\n fq_module = default_affine_fixed_qparams_fake_quant()\n fq_module.to(device)\n fixed_scale = fq_module.scale.clone()\n fixed_zero_point = fq_module.zero_point.clone()\n # run fq module and make sure the quantization parameters does not change\n torch.quantization.enable_observer(fq_module)\n fq_module(X)\n self.assertEqual(fixed_scale, fq_module.scale)\n self.assertEqual(fixed_zero_point, fq_module.zero_point)\n\n def test_fq_serializable_per_tensor(self):\n observer = default_observer\n quant_min = 0\n quant_max = 255\n for FakeQuantizeClass in [FakeQuantize, _LearnableFakeQuantize]:\n fq_module = FakeQuantizeClass(observer, quant_min, quant_max)\n X = torch.tensor([-5, -3.5, -2, 0, 3, 5, 7], dtype=torch.float32)\n y_ref = fq_module(X)\n state_dict = fq_module.state_dict()\n self.assertEqual(state_dict['scale'], 0.094488)\n self.assertEqual(state_dict['zero_point'], 53)\n b = io.BytesIO()\n torch.save(state_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n loaded_fq_module = FakeQuantizeClass(observer, quant_min, quant_max)\n loaded_fq_module.load_state_dict(loaded_dict)\n for key in state_dict:\n self.assertEqual(state_dict[key], loaded_fq_module.state_dict()[key])\n\n self.assertEqual(loaded_fq_module.calculate_qparams(), fq_module.calculate_qparams())\n\n def test_fake_quant_control(self):\n for fq_module in [torch.quantization.default_fake_quant(),\n _LearnableFakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0,\n quant_max=255,\n dtype=torch.quint8, qscheme=torch.per_tensor_affine,\n reduce_range=True)()]:\n torch.manual_seed(42)\n X = torch.rand(20, 10, dtype=torch.float32)\n # Output of fake quant is not identical to input\n Y = fq_module(X)\n self.assertNotEqual(Y, X)\n if type(fq_module) == _LearnableFakeQuantize:\n fq_module.toggle_fake_quant(False)\n else:\n torch.quantization.disable_fake_quant(fq_module)\n X = torch.rand(20, 10, dtype=torch.float32)\n Y = fq_module(X)\n # Fake quant is disabled,output is identical to input\n self.assertEqual(Y, X)\n\n # Explicit copy at this point in time, because FakeQuant keeps internal\n # state in mutable buffers.\n scale = fq_module.scale.clone().detach()\n zero_point = fq_module.zero_point.clone().detach()\n\n if type(fq_module) == _LearnableFakeQuantize:\n fq_module.toggle_observer_update(False)\n fq_module.toggle_fake_quant(True)\n else:\n torch.quantization.disable_observer(fq_module)\n torch.quantization.enable_fake_quant(fq_module)\n X = 10.0 * torch.rand(20, 10, dtype=torch.float32) - 5.0\n Y = fq_module(X)\n self.assertNotEqual(Y, X)\n # Observer is disabled, scale and zero-point do not change\n self.assertEqual(fq_module.scale, scale)\n self.assertEqual(fq_module.zero_point, zero_point)\n if type(fq_module) == _LearnableFakeQuantize:\n fq_module.toggle_observer_update(True)\n else:\n torch.quantization.enable_observer(fq_module)\n Y = fq_module(X)\n self.assertNotEqual(Y, X)\n # Observer is enabled, scale and zero-point are different\n self.assertNotEqual(fq_module.scale, scale)\n self.assertNotEqual(fq_module.zero_point, zero_point)\n\n def test_fake_quant_preserves_qparam_shapes_for_activations(self):\n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.linear = nn.Linear(4, 4)\n\n def forward(self, x):\n x = self.linear(x)\n return x\n\n m = Model()\n\n m.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')\n torch.quantization.prepare_qat(m, inplace=True)\n\n scale_shape_before = m.linear.activation_post_process.scale.shape\n zero_point_shape_before = m.linear.activation_post_process.zero_point.shape\n\n x = torch.rand(4, 4, 4, 4)\n m(x)\n scale_shape_after = m.linear.activation_post_process.scale.shape\n zero_point_shape_after = m.linear.activation_post_process.zero_point.shape\n self.assertEqual(\n scale_shape_before, scale_shape_after,\n msg=\"FakeQuant scale shape must stay consistent\")\n self.assertEqual(\n zero_point_shape_before, zero_point_shape_after,\n msg=\"FakeQuant zero_point shape must stay consistent\")\n\n def fake_quant_scriptable(self):\n observer = default_observer\n quant_min = 0\n quant_max = 255\n for FakeQuantizeClass in [FakeQuantize, _LearnableFakeQuantize]:\n fq_module = FakeQuantizeClass(observer, quant_min, quant_max)\n scripted_module = torch.jit.script(fq_module)\n\n X = torch.tensor([-5, -3.5, -2, 0, 3, 5, 7], dtype=torch.float32)\n\n fq_module(X)\n scripted_module(X)\n self.assertEqual(fq_module.calculate_qparams(), scripted_module.calculate_qparams())\n\n buf = io.BytesIO()\n torch.jit.save(scripted_module, buf)\n buf.seek(0)\n loaded_module = torch.jit.load(buf)\n self.assertEqual(fq_module.calculate_qparams(), loaded_module.calculate_qparams())\n\n\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_forward_per_channel(self, device, X):\n r\"\"\"Tests the forward path of the FakeQuantizePerTensorAffine op.\n \"\"\"\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, axis, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n scale = to_tensor(scale, device)\n zero_point = torch.tensor(zero_point).to(dtype=torch.int32, device=device)\n Y = _fake_quantize_per_channel_affine_reference(X.cpu(), scale.cpu(), zero_point.cpu(), axis, quant_min, quant_max)\n Y_prime = torch.fake_quantize_per_channel_affine(\n X, scale, zero_point, axis, quant_min, quant_max)\n np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)\n\n def _test_forward_per_channel_cachemask_impl(self, device):\n torch_types = (torch.qint8, torch.quint8)\n float_types = (torch.float32, torch.float16, torch.float64)\n for torch_type, float_type in itertools.product(torch_types, float_types):\n X = torch.randn(1, 2, 4, 4, dtype=float_type).to(device)\n # pick the scale + zp so that some values get clipped\n axis = 1\n obs = torch.quantization.PerChannelMinMaxObserver(axis, torch_type).to(device)\n obs(X * 0.75)\n scale, zero_point = obs.calculate_qparams()\n # TODO(future PR): fix the wrong dtype in obs.calculate_qparams and remove the cast\n zero_point = zero_point.to(torch.int32)\n quant_min, quant_max = obs._calculate_qmin_qmax()\n\n Y = _fake_quantize_per_channel_affine_reference(\n X.cpu(), scale.cpu(), zero_point.cpu(), axis, quant_min, quant_max)\n Y_prime = torch.fake_quantize_per_channel_affine(\n X, scale, zero_point, axis, quant_min, quant_max)\n np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)\n self.assertTrue(Y.dtype == float_type)\n\n def test_forward_per_channel_cachemask_cpu(self):\n self._test_forward_per_channel_cachemask_impl('cpu')\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_forward_per_channel_cachemask_cuda(self):\n self._test_forward_per_channel_cachemask_impl('cuda')\n\n def test_forward_per_channel_half_precision_numerics(self):\n scale = torch.randn(5).abs()\n zero = torch.randn(5).to(dtype=torch.int)\n axis = 1\n mini = 0\n maxi = 255\n\n for i in range(20):\n X1 = torch.randn(4, 5).to(torch.float16)\n Y1 = torch.fake_quantize_per_channel_affine(X1, scale, zero, axis, mini, maxi)\n Y1r = _fake_quantize_per_channel_affine_reference(X1, scale, zero, axis, mini, maxi)\n self.assertTrue(torch.allclose(Y1, Y1r, rtol=tolerance, atol=tolerance))\n\n # to force overflow\n X2 = torch.randn(4, 5).to(torch.float16)\n X2[0, 0] = 2**15 + .01\n Y2 = torch.fake_quantize_per_channel_affine(X2, scale, zero, axis, mini, maxi)\n Y2r = _fake_quantize_per_channel_affine_reference(X2, scale, zero, axis, mini, maxi)\n self.assertTrue(torch.allclose(Y2, Y2r, rtol=tolerance, atol=tolerance))\n\n scale = torch.zeros(5) + 10\n\n # to force underflow\n X3 = torch.randn(4, 5).to(torch.float16)\n X3[0, 0] = 2**-24\n Y3 = torch.fake_quantize_per_channel_affine(X3, scale, zero, axis, mini, maxi)\n Y3r = _fake_quantize_per_channel_affine_reference(X3, scale, zero, axis, mini, maxi)\n self.assertTrue(torch.allclose(Y3, Y3r, rtol=tolerance, atol=tolerance))\n\n def _test_learnable_forward_per_channel(self, X_base, device, scale_base, zero_point_base, axis):\n r\"\"\"Tests the forward path of the learnable FakeQuantizePerTensorAffine op.\n \"\"\"\n for n_bits in (4, 8):\n quant_min, quant_max = 0, 2 ** (n_bits) - 1\n\n scale_base = scale_base.to(device)\n zero_point_base = zero_point_base.to(device)\n\n X_curr = X_base.clone()\n scale_curr = scale_base.clone()\n zero_point_curr = zero_point_base.clone()\n\n Y = _fake_quantize_per_channel_affine_reference(\n X_curr, scale_curr, zero_point_curr.round().clamp(quant_min, quant_max), axis, quant_min, quant_max).to(device)\n for grad_factor in [0.1, 1.0, 10.0]:\n Y_prime = torch._fake_quantize_learnable_per_channel_affine(\n X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, grad_factor).to(device)\n self.assertTrue(\n torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),\n \"Expected kernel forward function to have results match the reference forward function\")\n\n @given(X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_learnable_forward_per_channel_cpu(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (_, _, axis, _) = X\n X_base = torch.tensor(X).to('cpu')\n channel_size = X_base.size(axis)\n scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))\n self._test_learnable_forward_per_channel(\n X_base, 'cpu', scale_base, zero_point_base, axis)\n\n @given(X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_learnable_forward_per_channel_cuda(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (_, _, axis, _) = X\n X_base = torch.tensor(X).to('cuda')\n channel_size = X_base.size(axis)\n scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))\n self._test_learnable_forward_per_channel(\n X_base, 'cuda', scale_base, zero_point_base, axis)\n\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_backward_per_channel(self, device, X):\n r\"\"\"Tests the backward method.\n \"\"\"\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, axis, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n scale = to_tensor(scale, device)\n zero_point = torch.tensor(zero_point).to(dtype=torch.int32, device=device)\n X.requires_grad_()\n Y_prime = torch.fake_quantize_per_channel_affine(\n X, scale, zero_point, axis, quant_min, quant_max)\n dout = torch.rand_like(X, dtype=torch.float).to(device)\n dX = _fake_quantize_per_channel_affine_grad_reference(\n dout, X, scale, zero_point, axis, quant_min, quant_max)\n Y_prime.backward(dout)\n np.testing.assert_allclose(dX.cpu().detach().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n def _test_backward_per_channel_cachemask_impl(self, device):\n torch_types = (torch.qint8, torch.quint8)\n float_types = (torch.float32, torch.float16, torch.float64)\n for torch_type, float_type in itertools.product(torch_types, float_types):\n X = torch.randn(1, 2, 4, 4, dtype=float_type).to(device)\n # pick the scale + zp so that some values get clipped\n axis = 1\n obs = torch.quantization.PerChannelMinMaxObserver(axis, torch_type).to(device)\n obs(X * 0.75)\n scale, zero_point = obs.calculate_qparams()\n # TODO(future PR): fix the wrong dtype in obs.calculate_qparams and remove the cast\n zero_point = zero_point.to(torch.int32)\n quant_min, quant_max = obs._calculate_qmin_qmax()\n X.requires_grad_()\n Y_prime = torch.fake_quantize_per_channel_affine(\n X, scale, zero_point, axis, quant_min, quant_max)\n dout = torch.rand_like(X, dtype=float_type).to(device)\n dX = _fake_quantize_per_channel_affine_grad_reference(\n dout, X, scale, zero_point, axis, quant_min, quant_max)\n Y_prime.backward(dout)\n np.testing.assert_allclose(\n dX.cpu().detach().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n assert(X.grad.dtype == float_type)\n\n\n def test_backward_per_channel_cachemask_cpu(self):\n self._test_backward_per_channel_cachemask_impl('cpu')\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_backward_per_channel_cachemask_cuda(self):\n self._test_backward_per_channel_cachemask_impl('cuda')\n\n def _test_learnable_backward_per_channel(self, X_base, device, scale_base, zero_point_base, axis):\n r\"\"\"Tests the backward path of the learnable FakeQuantizePerTensorAffine op.\n \"\"\"\n for n_bits in (4, 8):\n quant_min, quant_max = 0, 2 ** n_bits - 1\n\n scale_base = scale_base.to(device)\n zero_point_base = zero_point_base.to(device=device)\n\n X_curr = X_base.clone()\n X_curr.requires_grad_()\n scale_curr = scale_base.clone()\n scale_curr.requires_grad_()\n zero_point_curr = zero_point_base.clone()\n zero_point_curr.requires_grad_()\n\n for grad_factor in [0.1, 1.0, 10.0]:\n Y_prime = torch._fake_quantize_learnable_per_channel_affine(\n X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, grad_factor).to(device)\n\n dout = torch.rand(X_curr.shape, dtype=torch.float).to(device)\n dX, dScale, dZeroPoint = _fake_quantize_learnable_per_channel_affine_grad_reference(\n dout, X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, device)\n Y_prime.backward(dout)\n\n dX_expected = dX.to(device).detach()\n dX_actual = X_curr.to(device).grad.detach()\n dScale_expected = dScale.to(device).detach()\n dScale_actual = scale_curr.to(device).grad.detach()\n dZeroPoint_expected = dZeroPoint.to(device).detach()\n dZeroPoint_actual = zero_point_curr.to(device).grad.detach()\n tolerance = 1e-4\n\n self.assertTrue(\n torch.allclose(dX_expected, dX_actual, rtol=tolerance, atol=tolerance),\n \"Expected dX={} to match X.grad={}, X={}, s={}, z={}, dout={}, n_bits={}\".format(\n dX_expected, dX_actual, X_curr, scale_curr, zero_point_curr, dout, n_bits))\n self.assertTrue(\n torch.allclose(dScale_expected * grad_factor, dScale_actual, rtol=tolerance, atol=tolerance),\n \"Expected dScale={} to match scale.grad={}, X={}, s={}, z={}, dout={}, n_bits={}\".format(\n dScale_expected * grad_factor, dScale_actual,\n X_curr, scale_curr, zero_point_curr, dout, n_bits))\n self.assertTrue(\n torch.allclose(dZeroPoint_expected * grad_factor, dZeroPoint_actual, rtol=tolerance, atol=tolerance),\n \"Expected dZeroPoint={} to match zero_point.grad={}, X={}, s={}, z={}, dout={}, n_bits={}\".format(\n dZeroPoint_expected * grad_factor, dZeroPoint_actual,\n X_curr, scale_curr, zero_point_curr, dout, n_bits))\n X_curr.grad.data.zero_()\n scale_curr.grad.data.zero_()\n zero_point_curr.grad.data.zero_()\n\n @given(X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n def test_learnable_backward_per_channel_cpu(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (_, _, axis, _) = X\n X_base = torch.tensor(X).to('cpu')\n channel_size = X_base.size(axis)\n scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)\n zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))\n self._test_learnable_backward_per_channel(\n X_base, 'cpu', scale_base, zero_point_base, axis)\n\n @given(X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),\n qparams=hu.qparams(dtypes=torch.quint8)))\n @unittest.skipIf(not TEST_CUDA, \"No gpu is not available.\")\n def test_learnable_backward_per_channel_cuda(self, X):\n torch.random.manual_seed(NP_RANDOM_SEED)\n X, (scale, zero_point, axis, torch_type) = X\n X_base = torch.tensor(X).to('cuda')\n scale_base = to_tensor(scale, 'cuda')\n zero_point_base = to_tensor(zero_point, 'cuda')\n self._test_learnable_backward_per_channel(\n X_base, 'cuda', scale_base, zero_point_base, axis)\n\n def test_numerical_consistency_per_tensor(self):\n self._test_numerical_consistency('per_tensor')\n\n def test_numerical_consistency_per_channel(self):\n self._test_numerical_consistency('per_channel')\n\n def _test_numerical_consistency(self, test_type):\n r\"\"\"Comparing numerical consistency between quantize/dequantize op and the fake quantize op across devices and dtypes\n \"\"\"\n torch.random.manual_seed(NP_RANDOM_SEED)\n torch_types = [torch.qint8, torch.quint8]\n float_types = [torch.float, torch.float16, torch.float64]\n zero_types = [torch.int]\n devices = [torch.device('cpu'), torch.device('cuda')] if torch.cuda.is_available() else [torch.device('cpu')]\n axis = 1\n for i in range(20):\n for torch_type, float_type, device, zero_type in itertools.product(torch_types, float_types, devices, zero_types):\n X = torch.randn(3, 3, device=device).to(float_type)\n scales = (10 * torch.randn(3, device=device)).abs()\n scale = scales.mean().to(float).item()\n zeros = (10 * torch.randn(3, device=device)).abs().to(dtype=zero_type)\n zero = zeros.max().view(1).item()\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n test_was_run = False\n if test_type == \"per_tensor\":\n test_was_run = True\n Y = torch.dequantize(torch.quantize_per_tensor(X.to('cpu').to(torch.float),\n scale, zero, torch_type)).to(device).to(float_type)\n Y_prime = torch.fake_quantize_per_tensor_affine(X, scale, zero, quant_min, quant_max)\n self.assertEqual(\n Y, Y_prime, \"Difference found between dequant+quant_per_tensor and fake_quantize_per_tensor\")\n\n if test_type == \"per_channel\":\n test_was_run = True\n Y = torch.dequantize(torch.quantize_per_channel(X.to('cpu').to(torch.float), scales.to(\n 'cpu'), zeros.to('cpu'), axis, torch_type)).to(device).to(float_type)\n Y_prime = torch.fake_quantize_per_channel_affine(X, scales, zeros, axis, quant_min, quant_max)\n self.assertEqual(\n Y, Y_prime, \"Difference found between dequant+quant_per_channel and fake_quantize_per_channel\")\n self.assertTrue(test_was_run)\n\nif __name__ == '__main__':\n raise RuntimeError(\"This test file is not meant to be run directly, use:\\n\\n\"\n \"\\tpython test/test_quantization.py TESTNAME\\n\\n\"\n \"instead.\")\n",
"from functools import reduce\nimport torch\nimport operator\nfrom torch.fx.tensor_type import Dyn, is_consistent, TensorType, is_more_precise\nfrom typing import Callable, Dict\nfrom torch.fx.node import Target, Node\nfrom torch.nn.modules.batchnorm import BatchNorm2d\nfrom torch.nn.modules.conv import Conv2d\n\n\n_INFERENCE_RULES: Dict[Target, Callable] = {}\n\n\ndef expand_to_tensor_dim(t, n):\n \"\"\"\n Expand a type to the desired tensor dimension if possible\n Raise an error otherwise.\n - t is the given type\n - n is a number to expand to\n \"\"\"\n if t == Dyn:\n dims = [Dyn] * n\n return TensorType(tuple(dims))\n elif isinstance(t, TensorType):\n if len(t.__args__) != n:\n raise TypeError(f'Cannot extend tensor dimension. Tensor {t} has rank {len(t.__args__)}. It should have rank {n}')\n return t\n else:\n raise TypeError(f'Cannot match the type {t}')\n\n\ndef broadcast_types(t1, t2):\n if t1 == Dyn or t2 == Dyn:\n return t1, t2\n\n if isinstance(t1, TensorType) and isinstance(t2, TensorType):\n s1 = len(t1.__args__)\n s2 = len(t2.__args__)\n\n new_t1 = list(t1.__args__)\n new_t2 = list(t2.__args__)\n\n if abs(s1 - s2) > 1 or s1 == 0 or s2 == 0:\n raise TypeError(f'Cannot broadcast the tensors {t1} and {t2}')\n\n if s1 > s2:\n new_t2.insert(0, t1.__args__[0])\n\n elif s2 > s1:\n new_t1.insert(0, t2.__args__[0])\n\n for i, (x, y) in enumerate(zip(new_t1, new_t2)):\n if x == 1:\n new_t1[i] = y\n elif y == 1:\n new_t2[i] = x\n else:\n continue\n\n if tuple(new_t1) != t1.__args__ and tuple(new_t2) != t2.__args__:\n raise TypeError('In-place operations cannot not change shape')\n\n return TensorType(tuple(new_t1)), TensorType(tuple(new_t2))\n else:\n raise TypeError(f'Cannot broadcast types {t1} and {t2}')\n\ndef register_inference_rule(call_target):\n def register(fn):\n if call_target in _INFERENCE_RULES:\n raise RuntimeError('Inference rule already registered for {call_target}!')\n _INFERENCE_RULES[call_target] = fn\n return fn\n return register\n\n\n@register_inference_rule(torch.add)\n@register_inference_rule(operator.add)\ndef add_inference_rule(n: Node):\n assert isinstance(n.args[0], Node)\n assert isinstance(n.args[1], Node)\n t1 = n.args[0].type\n t2 = n.args[1].type\n\n # handle scalar addition\n if t1 == int and isinstance(t2, TensorType):\n n.type = t2\n return n.type\n\n elif t2 == int and isinstance(t1, TensorType):\n n.type = t1\n return n.type\n\n (new_t1, new_t2) = broadcast_types(t1, t2)\n n.args[0].type = new_t1\n n.args[1].type = new_t2\n\n if is_consistent(new_t1, new_t2):\n # we return the more precise type\n if is_more_precise(new_t1, new_t2):\n n.type = new_t2\n else:\n n.type = new_t1\n return n.type\n else:\n raise TypeError(f'Cannot add arguments {n.args[0]} ({ n.args[0].type}) and {n.args[1]} ({ n.args[1].type}) in node {n}.'\n f' Types should match ')\n\n\n@register_inference_rule(torch.transpose)\ndef transpose_inference_rule(n: Node):\n if n.target == torch.transpose:\n assert isinstance(n.args[0], Node)\n t = n.args[0].type\n\n assert isinstance(n.args[1], int)\n assert isinstance(n.args[2], int)\n dim1, dim2 = n.args[1], n.args[2]\n\n if t == Dyn:\n n.type = Dyn\n return n.type\n\n elif isinstance(t, TensorType):\n\n if 0 <= dim1 < len(t.__args__) and 0 <= dim2 < len(t.__args__):\n new_type = list(t.__args__)\n new_type[dim1], new_type[dim2] = new_type[dim2], new_type[dim1]\n final = TensorType(new_type)\n n.type = final\n return n.type\n else:\n raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')\n else:\n raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')\n\n\n@register_inference_rule(torch.reshape)\ndef reshape_inference_rule(n: Node):\n assert isinstance(n.args[0], Node)\n t1 = n.args[0].type\n\n assert isinstance(n.args[1], list)\n t2 = n.args[1]\n t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2])\n\n # if we do not know the original tensor dimension,\n # we return the required dimension\n if t1 == Dyn:\n n.type = t2_type\n return t2_type\n\n # if any of the dimensions are unknown,\n # we check for divisibility\n elif isinstance(t1, TensorType) and Dyn in t1.__args__ or -1 in t2:\n assert isinstance(t1, TensorType)\n a = [e if e != Dyn else 1 for e in t1.__args__]\n p1 = reduce(lambda x, y: x * y, a)\n p2 = reduce(lambda x, y: x * y, t2)\n if p1 % p2 == 0 or p2 % p1 == 0:\n n.type = t2_type\n return t2_type\n else:\n raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')\n\n # if all dimensions are known we check the products\n elif isinstance(t1, TensorType):\n p1 = reduce(lambda x, y: x * y, t1.__args__)\n p2 = reduce(lambda x, y: x * y, t2)\n if p1 == p2:\n n.type = t2_type\n return t2_type\n else:\n raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')\n\n else:\n raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')\n\n@register_inference_rule(BatchNorm2d)\ndef bn2d_inference_rule(n: Node, module_instance):\n \"\"\"\n Given a BatchNorm2D instance and a node check the following conditions:\n - the input type can be expanded to a size 4 tensor: t = (x_1, x_2, x_3, x_4)\n - the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4')\n - t is consistent with t'\n - x_2 is consistent with the module's num_features\n - x_2' is consistent with the module's num_features\n output type: the more precise type of t and t'\n \"\"\"\n assert isinstance(n.args[0], Node)\n n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)\n arg_type = n.args[0].type\n n.type = expand_to_tensor_dim(n.type, 4)\n\n # we check the conditions on the incoming argument\n # and any existing annotation\n # we also check for consistency between both annotations\n if is_consistent(arg_type.__args__[1], module_instance.num_features) and \\\n is_consistent(n.type.__args__[1], module_instance.num_features) and \\\n is_consistent(arg_type, n.type):\n\n # we choose the more precise type\n # to be the node type\n # so if an incoming argument has more type information\n # we set this node's type to be the argument type\n n.type = get_greatest_upper_bound(arg_type, n.type)\n return n.type\n else:\n raise TypeError(f'Cannot apply {module_instance} with input type {arg_type} and existing type {n.type} on {n}')\n\n\ndef calculate(d_in, module_instance, index):\n \"\"\"\n For calculating h_in and w_out.\n \"\"\"\n\n padding = (module_instance.padding, module_instance.padding) \\\n if isinstance(module_instance.padding, int) else module_instance.padding\n kernel_size = (module_instance.kernel_size, module_instance.kernel_size)\\\n if isinstance(module_instance.kernel_size, int) else module_instance.kernel_size\n stride = (module_instance.stride, module_instance.stride) \\\n if isinstance(module_instance.stride, int) else module_instance.stride\n dilation = (module_instance.dilation, module_instance.dilation)\\\n if isinstance(module_instance.dilation, int) else module_instance.dilation\n\n if d_in == Dyn:\n return Dyn\n\n elif isinstance(d_in, int):\n n = d_in + 2 * padding[index] - \\\n dilation[index] * \\\n (kernel_size[index] - 1) - 1\n\n return (n // stride[0]) + 1\n\n else:\n raise TypeError(f'{d_in} in {module_instance} must be a number or Dyn')\n\n\ndef get_greatest_upper_bound(type1, type2):\n \"\"\"\n Get the most precise type that's consistent with the given types\n \"\"\"\n if type1 == Dyn:\n return type2\n elif type2 == Dyn:\n return type1\n elif isinstance(type1, TensorType) and isinstance(type2, TensorType):\n if not is_consistent(type1, type2):\n raise TypeError(f'Inconsistent types {type1}, {type2}')\n gub = [t1 if is_more_precise(t1, t2) else t2 for (t1, t2) in zip(type1.__args__, type2.__args__)]\n return TensorType(tuple(gub))\n\n\n@register_inference_rule(Conv2d)\ndef conv2d_inference_rule(n: Node, module_instance):\n \"\"\"\n Given a Conv2D instance and a node check the following conditions:\n - the input type can be expanded to a size 4 tensor: t = (x_1, x_2, H, W)\n - the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4')\n - x_2 is consistent with the module's in_channels\n - let o = (x_1, out_channels, H_out, W_out)\n then the output is the greatest upper bound of o and the existing node type t'.\n \"\"\"\n assert isinstance(n.args[0], Node)\n n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)\n arg_type = n.args[0].type\n curr_node_type = expand_to_tensor_dim(n.type, 4)\n\n if is_consistent(arg_type.__args__[1], module_instance.in_channels):\n w_in = arg_type.__args__[3]\n h_in = arg_type.__args__[2]\n h_out = calculate(h_in, module_instance, 0)\n w_out = calculate(w_in, module_instance, 1)\n new_type = TensorType((arg_type.__args__[0], module_instance.out_channels, h_out, w_out))\n gub = get_greatest_upper_bound(new_type, curr_node_type)\n n.type = gub\n\n return n.type\n else:\n raise TypeError(f'Cannot apply {module_instance} with input type { arg_type} and existing type {n.type} on {n}')\n\n\n@register_inference_rule(torch.nn.ReLU)\ndef relu_inference_rule(n: Node, module_instance):\n \"\"\"\n Input and output shapes should be equal.\n \"\"\"\n assert isinstance(n.args[0], Node)\n\n if n.args[0].type == Dyn and isinstance(n.type, TensorType):\n n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))\n\n if isinstance(n.args[0].type, TensorType):\n n.type = get_greatest_upper_bound(n.args[0].type, n.type)\n return n.type\n\n\ndef maxpool2d_check(typ, module_instance):\n new_type_list = list(typ.__args__)\n if len(new_type_list) == 4 or len(new_type_list) == 3:\n w_in = new_type_list[-1]\n h_in = new_type_list[-2]\n h_out = calculate(h_in, module_instance, 0)\n w_out = calculate(w_in, module_instance, 1)\n new_type_list[-1] = w_out\n new_type_list[-2] = h_out\n return TensorType(tuple(new_type_list))\n\n else:\n raise TypeError(f'Wrong size {typ} for {module_instance}')\n\n\n@register_inference_rule(torch.nn.MaxPool2d)\ndef maxpool2d_inference_rule(n: Node, module_instance):\n \"\"\"\n Given a MaxPool2D instance and a node check the following conditions:\n - Input size matches size 3 or 4\n - Current node type is consistent with the output type we will calculate\n - Input size matches output size and the last two dimensions of the output\n are w_out and h_out. The remaining dimensions are the same as the input\n - Our final result is the greatest upper bound of the output we calculate\n and the current node type.\n \"\"\"\n assert isinstance(n.args[0], Node)\n\n if n.args[0].type == Dyn and isinstance(n.type, TensorType):\n n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))\n if isinstance(n.args[0].type, TensorType):\n output = maxpool2d_check(n.args[0].type, module_instance)\n n.type = get_greatest_upper_bound(output, n.type)\n return n.type\n\n\n\ndef linear_check(tensor_type, module_instance):\n \"\"\"\n Checks that an input tensor type satisfies the conditions for linear operation\n and returns the output type based on in and out features given by module_instance\n \"\"\"\n if len(tensor_type.__args__) >= 2:\n if is_consistent(module_instance.in_features, tensor_type.__args__[-1]):\n # Todo backwards propagation\n new_type_args = list(tensor_type.__args__)\n new_type_args[-1] = module_instance.out_features\n return TensorType(tuple(new_type_args))\n else:\n raise TypeError(f'Inconsistent {module_instance.in_features} and {tensor_type.__args__[-1]} in {module_instance}')\n else:\n raise TypeError(f'Type {tensor_type} must have rank 2 or more.')\n\n\n@register_inference_rule(torch.nn.Linear)\ndef linear_inference_rule(n: Node, module_instance):\n assert isinstance(n.args[0], Node)\n if n.args[0].type == Dyn and isinstance(n.type, TensorType):\n n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))\n if isinstance(n.args[0].type, TensorType):\n output_type = linear_check(n.args[0].type, module_instance)\n n.type = get_greatest_upper_bound(output_type, n.type)\n return n.type\n\n\n\ndef adaptiveavgpool2d_check(tensor_type, module_instance):\n output_size = module_instance.output_size\n if isinstance(output_size, int):\n output_size = [output_size, output_size]\n elif isinstance(output_size, tuple):\n output_size = list(output_size)\n if output_size[0] is None:\n output_size[0] = output_size[1]\n if output_size[1] is None:\n output_size[1] = output_size[0]\n\n new_type_list = list(tensor_type.__args__)\n\n if len(tensor_type.__args__) == 4 or len(tensor_type.__args__) == 3:\n new_type_list[-1] = output_size[1]\n new_type_list[-2] = output_size[0]\n\n return TensorType(tuple(new_type_list))\n\n else:\n raise TypeError(f'Tensor ranks must be 3 or 4. Got {tensor_type}')\n\n@register_inference_rule(torch.nn.AdaptiveAvgPool2d)\ndef adaptiveavgpool2d_inference_rule(n: Node, module_instance):\n \"\"\"\n The input and output sizes should be the same except for the last\n two dimensions taken from the input, which represent width and height\n \"\"\"\n assert isinstance(n.args[0], Node)\n if n.args[0].type == Dyn and isinstance(n.type, TensorType):\n n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))\n if isinstance(n.args[0].type, TensorType):\n output_type = adaptiveavgpool2d_check(n.args[0].type, module_instance)\n n.type = get_greatest_upper_bound(n.type, output_type)\n return n.type\n\n\nclass GraphTypeChecker:\n def __init__(self, env, traced):\n self.env = env\n self.traced = traced\n\n def type_check(self):\n \"\"\"\n A gradual type checker for graphs\n Effect: every node's field type will be\n populated with a type after type-checking is done\n \"\"\"\n graph = self.traced.graph\n\n # type check every node with gradual type rules\n # if any node does not type check return false\n for n in graph.nodes:\n self.type_check_node(n)\n return True\n\n def type_check_node(self, n: Node):\n \"\"\"\n Type check a given fx node.\n Current operations:\n - Reshape\n - Transpose\n - Add\n \"\"\"\n if n.type is None:\n n.type = Dyn\n\n if n.op == 'placeholder':\n return n.type\n\n if n.op == 'call_function':\n if n.target in _INFERENCE_RULES:\n return _INFERENCE_RULES[n.target](n)\n else:\n raise RuntimeError(f'No inference rule registered for target {n.target}!')\n\n if n.op == 'call_module':\n module_instance = getattr(self.traced, str(n.target))\n if type(module_instance) in _INFERENCE_RULES:\n return _INFERENCE_RULES[type(module_instance)](n, module_instance)\n else:\n raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')\n\n if n.op == 'output':\n assert isinstance(n.args[0], Node)\n n.type = n.args[0].type\n return n.type\n\n else:\n raise NotImplementedError(\"Method not yet implemented\")\n"
] |
[
[
"torch.quantization.MinMaxObserver",
"torch.jit.load",
"torch._fake_quantize_learnable_per_tensor_affine",
"torch.rand_like",
"torch.load",
"torch.zeros",
"torch.iinfo",
"torch.cuda.amp.autocast",
"torch.quantization.default_affine_fixed_qparams_fake_quant",
"torch.quantization._learnable_fake_quantize._LearnableFakeQuantize.with_args",
"torch.quantization.disable_observer",
"torch.cuda.is_available",
"torch.device",
"torch.allclose",
"torch.save",
"torch.jit.script",
"torch.ones",
"torch.quantization.prepare_qat",
"torch.randn",
"torch.round",
"torch.fake_quantize_per_tensor_affine",
"torch.tensor",
"torch.rand",
"torch.quantization.disable_fake_quant",
"torch.normal",
"torch.quantization.enable_observer",
"torch.testing._internal.hypothesis_utils.qparams",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.testing._internal.hypothesis_utils.array_shapes",
"torch.quantization.enable_fake_quant",
"torch.nn.Linear",
"torch.testing._internal.common_quantized.to_tensor",
"torch.testing._internal.hypothesis_utils.assert_deadline_disabled",
"torch.testing._internal.common_quantized._fake_quantize_per_channel_affine_grad_reference",
"torch.fake_quantize_per_channel_affine",
"torch.quantization.get_default_qat_qconfig",
"torch.jit.save",
"numpy.random.seed",
"torch.testing._internal.common_quantized._fake_quantize_per_channel_affine_reference",
"torch.quantization.default_fake_quant",
"torch.manual_seed",
"torch.random.manual_seed",
"torch.testing._internal.hypothesis_utils.floats",
"torch.quantization.PerChannelMinMaxObserver",
"torch.unbind",
"torch._fake_quantize_learnable_per_channel_affine"
],
[
"torch.fx.tensor_type.is_more_precise",
"torch.fx.tensor_type.is_consistent",
"torch.fx.tensor_type.TensorType"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
metabolize/entente
|
[
"c1b16bb7c7fb83b31db4e8ddaf65f1504374fe7a"
] |
[
"entente/test_restore_correspondence.py"
] |
[
"from entente.restore_correspondence import find_correspondence, restore_correspondence\nimport numpy as np\nimport pytest\nfrom .restore_correspondence import _maybe_tqdm\n\n\ndef create_truncated_test_mesh():\n from .testing import vitra_mesh\n\n # For performance.\n return vitra_mesh().picking_vertices(np.arange(1000))\n\n\ndef test_helper():\n assert [x for x in _maybe_tqdm(iter([1, 2, 3]), progress=True)] == [1, 2, 3]\n assert [x for x in _maybe_tqdm(iter([1, 2, 3]), progress=False)] == [1, 2, 3]\n\n\ndef test_find_correspondence_matched():\n b = create_truncated_test_mesh().v\n expected_correspondence = np.random.permutation(len(b))\n a = b[expected_correspondence]\n\n correspondence = find_correspondence(a, b, progress=False)\n\n np.testing.assert_array_equal(correspondence, expected_correspondence)\n np.testing.assert_array_equal(b[correspondence], a)\n\n\ndef test_find_correspondence_unmatched():\n b = create_truncated_test_mesh().v\n expected_correspondence = np.random.permutation(len(b))\n a = b[expected_correspondence]\n\n a = np.vstack([a, np.array([1.0, 2.0, 3.0])])\n\n with pytest.raises(ValueError):\n find_correspondence(a, b, progress=False)\n\n expected_correspondence = np.append(1 + expected_correspondence, np.array([-1]))\n b = np.vstack([np.array([3.0, 2.0, 1.0]), b])\n expected_unmatched_b = np.array([0])\n\n with pytest.raises(ValueError):\n find_correspondence(a, b, progress=False)\n\n correspondence, unmatched_b = find_correspondence(\n a, b, all_must_match=False, ret_unmatched_b=True, progress=False\n )\n\n np.testing.assert_array_equal(correspondence, expected_correspondence)\n np.testing.assert_array_equal(unmatched_b, expected_unmatched_b)\n reconstructed_a = np.vstack(\n [b[correspondence[np.where(correspondence != -1)]], np.array([1.0, 2.0, 3.0])]\n )\n np.testing.assert_array_equal(reconstructed_a, a)\n\n\ndef test_restore_correspondence():\n from .shuffle import shuffle_vertices\n\n test_mesh = create_truncated_test_mesh()\n shuffled, ordering = shuffle_vertices(test_mesh, ret_new_ordering=True)\n\n restored, v_old_to_new = restore_correspondence(shuffled, test_mesh, progress=False)\n\n np.testing.assert_array_equal(restored.v, test_mesh.v)\n np.testing.assert_array_equal(restored.f, test_mesh.f)\n np.testing.assert_array_equal(v_old_to_new, ordering)\n"
] |
[
[
"numpy.testing.assert_array_equal",
"numpy.arange",
"numpy.array",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VCG/gp
|
[
"a41d0c52fd09b5e34804b9c6082778a75dfc03c1"
] |
[
"raveler/ray/ray/features/moments.py"
] |
[
"import numpy as np\nfrom scipy.misc import comb as nchoosek\nfrom . import base\n\nclass Manager(base.Null):\n def __init__(self, nmoments=4, use_diff_features=True, oriented=False, \n normalize=False, *args, **kwargs):\n super(Manager, self).__init__()\n self.nmoments = nmoments\n self.use_diff_features = use_diff_features\n self.oriented = oriented\n self.normalize = normalize\n\n @classmethod\n def load_dict(cls, fm_info):\n obj = cls(fm_info['nmoments'], fm_info['use_diff'],\n fm_info['oriented'], fm_info['normalize'])\n return obj\n\n def write_fm(self, json_fm={}):\n if 'feature_list' not in json_fm:\n json_fm['feature_list'] = []\n json_fm['feature_list'].append('moments')\n json_fm['moments'] = {\n 'nmoments' : self.nmoments,\n 'use_diff' : self.use_diff_features,\n 'oriented' : self.oriented,\n 'normalize' : self.normalize\n }\n return json_fm\n\n def compute_moment_sums(self, ar, idxs):\n values = ar[idxs][...,np.newaxis]\n return (values ** np.arange(self.nmoments+1)).sum(axis=0).T\n\n def create_node_cache(self, g, n):\n node_idxs = list(g.node[n]['extent'])\n if self.oriented:\n ar = g.max_probabilities_r\n else:\n ar = g.non_oriented_probabilities_r\n return self.compute_moment_sums(ar, node_idxs)\n\n def create_edge_cache(self, g, n1, n2):\n edge_idxs = list(g[n1][n2]['boundary'])\n if self.oriented:\n ar = g.oriented_probabilities_r\n else:\n ar = g.non_oriented_probabilities_r\n return self.compute_moment_sums(ar, edge_idxs)\n\n def update_node_cache(self, g, n1, n2, dst, src):\n dst += src\n\n def update_edge_cache(self, g, e1, e2, dst, src):\n dst += src\n\n def pixelwise_update_node_cache(self, g, n, dst, idxs, remove=False):\n if len(idxs) == 0: return\n a = -1.0 if remove else 1.0\n if self.oriented:\n ar = g.max_probabilities_r\n else:\n ar = g.non_oriented_probabilities_r\n dst += a * self.compute_moment_sums(ar, idxs)\n\n def pixelwise_update_edge_cache(self, g, n1, n2, dst, idxs, remove=False):\n if len(idxs) == 0: return\n a = -1.0 if remove else 1.0\n if self.oriented:\n ar = g.max_probabilities_r\n else:\n ar = g.non_oriented_probabilities_r\n dst += a * self.compute_moment_sums(ar, idxs)\n\n def compute_node_features(self, g, n, cache=None):\n if cache is None: \n cache = g.node[n][self.default_cache]\n feat = central_moments_from_noncentral_sums(cache)\n if self.normalize:\n feat = ith_root(feat)\n n = feat.ravel()[0]\n return np.concatenate(([n], feat[1:].T.ravel()))\n\n def compute_edge_features(self, g, n1, n2, cache=None):\n if cache is None: \n cache = g[n1][n2][self.default_cache]\n feat = central_moments_from_noncentral_sums(cache)\n if self.normalize:\n feat = ith_root(feat)\n n = feat.ravel()[0]\n return np.concatenate(([n], feat[1:].T.ravel()))\n\n def compute_difference_features(self,g, n1, n2, cache1=None, cache2=None,\n nthroot=False):\n if not self.use_diff_features:\n return np.array([])\n if cache1 is None:\n cache1 = g.node[n1][self.default_cache]\n m1 = central_moments_from_noncentral_sums(cache1)\n\n if cache2 is None:\n cache2 = g.node[n2][self.default_cache]\n m2 = central_moments_from_noncentral_sums(cache2)\n \n if nthroot or self.normalize:\n m1, m2 = map(ith_root, [m1, m2])\n feat = abs(m1-m2)\n n = feat.ravel()[0]\n return np.concatenate(([n], feat[1:].T.ravel()))\n\ndef central_moments_from_noncentral_sums(a):\n \"\"\"Compute moments about the mean from sums of x**i, for i=0, ..., len(a).\n\n The first two moments about the mean (1 and 0) would always be \n uninteresting so the function returns n (the sample size) and mu (the \n sample mean) in their place.\n \"\"\"\n a = a.astype(np.double)\n if len(a) == 1:\n return a\n N = a.copy()[0]\n a /= N\n mu = a.copy()[1]\n ac = np.zeros_like(a)\n for n in range(2,len(a)):\n js = np.arange(n+1)\n if a.ndim > 1: js = js[:,np.newaxis]\n # Formula found in Wikipedia page for \"Central moment\", 2011-07-31\n ac[n] = (nchoosek(n,js) * \n (-1)**(n-js) * a[js.ravel()] * mu**(n-js)).sum(axis=0)\n ac[0] = N\n ac[1] = mu\n return ac\n\ndef ith_root(ar):\n \"\"\"Get the ith root of the array values at ar[i] for i > 1.\"\"\"\n if len(ar) < 2:\n return ar\n ar = ar.copy()\n ar[2:] = np.sign(ar[2:]) * \\\n (abs(ar[2:]) ** (1.0/np.arange(2, len(ar)))[:, np.newaxis])\n return ar\n\n"
] |
[
[
"numpy.arange",
"numpy.sign",
"numpy.zeros_like",
"numpy.array",
"scipy.misc.comb"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.19",
"0.18",
"1.2",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
AndrewQuinn2020/EECS-332-MPs
|
[
"ee164e98bd6b1b05296e4abec69a8b5d5de2581b"
] |
[
"MP3/mp3_testgen.py"
] |
[
"#!/usr/bin/python3\n\n# Anything not directly related to processing here\nimport sys\nfrom math import floor\nfrom pathlib import Path\nfrom random import randint\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mp3_helper import *\nfrom PIL import Image\n\nnp.set_printoptions(threshold=sys.maxsize)\nnp.set_printoptions(linewidth=1000)\n\n\nimage_dimensions = (5, 5)\n\n\nif __name__ == \"__main__\":\n hello()\n print(\"\\n\\nThis is the test image generator for MP #3.\")\n print(\"We are going to generate a bunch of small bitmaps with colors\")\n print(\"differing by small values.\")\n print('\\n\\nThe name \"test_image_4_xxx.bmp\" indicates 4 different')\n print(\"shades you probably can't see; once you histogram EQ it they should\")\n print(\"fill all or almost all of the whole spectrum. For example,\")\n print(\"a properly histogram EQ'd test_image_2_000.bmp should be pure\")\n print(\"black and pure white.\")\n\n for x in range(2, 5):\n for i in range(0, 5):\n new_bmp = np.random.choice(\n a=list(range(0, x)), size=image_dimensions\n ).astype(np.uint8)\n new_bmp = new_bmp + randint(0, 256 - x)\n print(new_bmp)\n im = Image.fromarray(new_bmp, \"L\")\n\n file_index = str(i).zfill(3)\n im.save(\n os.path.join(\n test_images_dir, \"test_image_{}_{}.bmp\".format(x, file_index)\n )\n )\n"
] |
[
[
"numpy.set_printoptions"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stanford-futuredata/Willump-Simple
|
[
"56d52074b671e07a364744e8195fcdc91926c3a8"
] |
[
"tests/benchmark_scripts/product_eval.py"
] |
[
"import argparse\nimport pickle\nimport time\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nfrom product_utils import *\nfrom willump.evaluation.willump_executor import willump_execute\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-c\", \"--cascades\", action=\"store_true\", help=\"Cascade threshold\")\nargs = parser.parse_args()\nif args.cascades:\n cascades_dict = pickle.load(open(base_directory + \"lazada_training_cascades.pk\", \"rb\"))\nelse:\n cascades_dict = None\n\n\n@willump_execute(predict_function=product_predict,\n confidence_function=product_confidence,\n predict_cascades_params=cascades_dict)\ndef product_eval_pipeline(input_x, model, title_vect, color_vect, brand_vect):\n title_result = transform_data(input_x, title_vect)\n color_result = transform_data(input_x, color_vect)\n brand_result = transform_data(input_x, brand_vect)\n return product_predict(model, [title_result, color_result, brand_result])\n\n\nif __name__ == '__main__':\n df = pd.read_csv(base_directory + \"lazada_data_train.csv\", header=None,\n names=['country', 'sku_id', 'title', 'category_lvl_1', 'category_lvl_2', 'category_lvl_3',\n 'short_description', 'price', 'product_type'])\n y = np.loadtxt(base_directory + \"conciseness_train.labels\", dtype=int)\n _, test_df, _, test_y = train_test_split(df, y, test_size=0.2, random_state=42)\n title_vectorizer, color_vectorizer, brand_vectorizer = pickle.load(\n open(base_directory + \"lazada_vectorizers.pk\", \"rb\"))\n model = pickle.load(open(base_directory + \"lazada_model.pk\", \"rb\"))\n\n product_eval_pipeline(test_df, model, title_vectorizer, color_vectorizer, brand_vectorizer)\n product_eval_pipeline(test_df, model, title_vectorizer, color_vectorizer, brand_vectorizer)\n\n start_time = time.time()\n preds = product_eval_pipeline(test_df, model, title_vectorizer, color_vectorizer, brand_vectorizer)\n time_elapsed = time.time() - start_time\n\n print(\"Elapsed Time %fs Num Rows %d Throughput %f rows/sec\" %\n (time_elapsed, len(test_df), len(test_df) / time_elapsed))\n\n print(\"1 - RMSE Score: %f\" % product_score(preds, test_y))\n"
] |
[
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Sayar1106/OTTPlatformRecommender
|
[
"85b72dfe9f810e3b6e12f8c7702ef94db3a03190"
] |
[
"sample_model/inference.py"
] |
[
"import joblib\nimport pickle\nimport os\nimport config\nimport pandas as pd\nimport click\n\n\ndef load_model_helper(file_path):\n if os.path.split(\".\")[-1] == \"pickle\":\n return pickle.load(open(file_path, 'wb'))\n \n return joblib.load(file_path)\n\ndef fetch_artist_columns(df, artist_list):\n return [artist for artist in df[\"artists\"].to_list() for a in artist_list if a in artist]\n\n\nclass SpotifyRecommender:\n def __init__(self, model):\n self.model = model\n \n def _predict(self, arr, k=20):\n return self.model.kneighbors(arr, \n n_neighbors=k, \n return_distance=False)\n \n def create_playlist(self, arr):\n predictions = self._predict(arr)\n lookup_table = pd.read_csv(config.LOOKUP_TABLE)\n artist_list = lookup_table.iloc[predictions[0][1:], 1].to_list()\n master_table = pd.read_csv(config.MASTER_TABLE, usecols=[\"artists\", \"name\", \"popularity\"])\n\n songs = master_table[master_table[\"artists\"].isin(fetch_artist_columns(master_table, artist_list))]\n songs = songs.drop_duplicates(subset=[\"name\"], keep=\"first\")\n\n return [*songs[[\"artists\", \"name\"]].sample(n=30).itertuples(name=\"Songs\", index=False)]\n\n\[email protected]()\[email protected](\"--artist_name\", type=str, help=\"Enter the artist name.\")\ndef main(artist_name):\n model = load_model_helper(config.MODEL_OUTPUT)\n spotify_recommender = SpotifyRecommender(model)\n df = pd.read_csv(config.MODEL_INPUT, usecols=[\"artists\", \"acousticness\", \"danceability\", \n \"energy\", \"instrumentalness\", \n \"liveness\", \"loudness\", \"speechiness\", \n \"tempo\", \"valence\", \"popularity\"])\n arr = df[df[\"artists\"].isin([artist_name])].values[:,1:]\n \n playlist = spotify_recommender.create_playlist(arr)\n print(playlist)\n \n\nif __name__ == \"__main__\":\n main()\n\n \n"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
zhengcj1/ChID-Dataset
|
[
"f7d9b7b75cccd50455987a623c898b490e8450f6"
] |
[
"Competition/RNN-based Baseline/Models/SAR.py"
] |
[
"import tensorflow as tf\nfrom Models.BasicModel import BasicModel\n\nclass Model(BasicModel):\n def __init__(self,\n learning_rate,\n init_word_embed,\n init_idiom_embed,\n size_embed=200,\n num_units=100, # make sure that num_units = size_embed / 2\n max_gradient_norm=5.0):\n\n assert size_embed == 2 * num_units\n\n super(Model, self).__init__()\n super(Model, self)._create_embedding(init_word_embed, init_idiom_embed)\n\n doc_embedding = tf.cond(self.is_train,\n lambda: tf.nn.dropout(tf.nn.embedding_lookup(self.word_embed_matrix, self.document), 0.5),\n lambda: tf.nn.embedding_lookup(self.word_embed_matrix, self.document))\n # [batch, length, size_embed]\n can_embedding = tf.nn.embedding_lookup(self.idiom_embed_matrix, self.candidates) # [batch, 10, size_embed]\n\n with tf.variable_scope(\"doc\"):\n cell_fw_doc = tf.nn.rnn_cell.LSTMCell(num_units, initializer=tf.orthogonal_initializer())\n cell_bw_doc = tf.nn.rnn_cell.LSTMCell(num_units, initializer=tf.orthogonal_initializer())\n h_doc, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw_doc, cell_bw_doc, doc_embedding, self.doc_length,\n dtype=tf.float32, scope=\"bi_lstm\")\n state_doc = tf.concat(h_doc, 2) # [batch, length, 2 * num_units]\n\n blanks_states = tf.matmul(self.locations, state_doc) # query, [batch, labels, 2 * num_units]\n bilinear_attention = tf.get_variable(\"bilinear_attention\", [2 * num_units, 2 * num_units], tf.float32)\n attention_matrix = tf.matmul(tf.einsum(\"abc,cd->abd\", blanks_states, bilinear_attention), # [batch, labels, 2 * num_units]\n tf.transpose(state_doc, [0, 2, 1])) # [batch, 2 * num_units, length]\n tmp = tf.exp(attention_matrix) * tf.tile(tf.expand_dims(self.mask, axis=1), [1, tf.shape(blanks_states)[1], 1])\n attention = tf.div(tmp, tf.reduce_sum(tmp, axis=-1, keep_dims=True))\n #attention = tf.nn.softmax(attention_matrix) # [batch, labels, length]\n state_attention = tf.matmul(attention, state_doc) # [batch, labels, 2 * num_units]\n\n match_matrix = tf.matmul(state_attention, tf.transpose(can_embedding, [0, 2, 1])) # [batch, labels, 10]\n self.logits = tf.nn.softmax(match_matrix)\n\n super(Model, self)._create_loss()\n super(Model, self)._create_train_step(learning_rate, max_gradient_norm)"
] |
[
[
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.exp",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.einsum",
"tensorflow.expand_dims",
"tensorflow.orthogonal_initializer",
"tensorflow.variable_scope",
"tensorflow.nn.embedding_lookup"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
atulvpweb/Screeni-py
|
[
"2a0b995ce134fb55977fa2ab38274a72392921fc"
] |
[
"src/classes/Screener.py"
] |
[
"'''\n * Project : Screenipy\n * Author : Pranjal Joshi\n * Created : 28/04/2021\n * Description : Class for analyzing and validating stocks\n'''\n\nimport sys\nimport math\nimport numpy as np\nimport pandas as pd\nimport talib\nimport classes.ConfigManager as ConfigManager\nfrom scipy.signal import argrelextrema\nfrom classes.ColorText import colorText\nfrom classes.SuppressOutput import SuppressOutput\n\n# Exception for newly listed stocks with candle nos < daysToLookback\nclass StockDataNotAdequate(Exception):\n pass\n\n# This Class contains methods for stock analysis and screening validation\nclass tools:\n\n # Private method to find candle type\n # True = Bullish, False = Bearish\n def getCandleType(dailyData):\n if dailyData['Close'][0] >= dailyData['Open'][0]:\n return True\n else:\n return False\n\n # Preprocess the acquired data\n def preprocessData(data, daysToLookback=ConfigManager.daysToLookback):\n if ConfigManager.useEMA:\n sma = talib.EMA(data['Close'],timeperiod=50)\n lma = talib.EMA(data['Close'],timeperiod=200)\n data.insert(6,'SMA',sma)\n data.insert(7,'LMA',lma)\n else:\n sma = data.rolling(window=50).mean()\n lma = data.rolling(window=200).mean()\n data.insert(6,'SMA',sma['Close'])\n data.insert(7,'LMA',lma['Close'])\n vol = data.rolling(window=20).mean()\n rsi = talib.RSI(data['Close'], timeperiod=14)\n data.insert(8,'VolMA',vol['Volume'])\n data.insert(9,'RSI',rsi)\n data = data[::-1] # Reverse the dataframe\n # data = data.fillna(0)\n # data = data.replace([np.inf, -np.inf], 0)\n fullData = data\n trimmedData = data.head(daysToLookback)\n return (fullData, trimmedData)\n\n # Validate LTP within limits\n def validateLTP(data, dict, saveDict, minLTP=ConfigManager.minLTP, maxLTP=ConfigManager.maxLTP):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n recent = data.head(1)\n ltp = round(recent['Close'][0],2)\n saveDict['LTP'] = str(ltp)\n verifyStageTwo = True\n if(ConfigManager.stageTwo):\n yearlyLow = data.head(300).min()['Close']\n yearlyHigh = data.head(300).max()['Close']\n if ltp < (2 * yearlyLow) or ltp < (0.75 * yearlyHigh):\n verifyStageTwo = False\n if(ltp >= minLTP and ltp <= maxLTP and verifyStageTwo):\n dict['LTP'] = colorText.GREEN + (\"%.2f\" % ltp) + colorText.END\n return True\n else:\n dict['LTP'] = colorText.FAIL + (\"%.2f\" % ltp) + colorText.END\n return False\n\n # Validate if share prices are consolidating\n def validateConsolidation(data, dict, saveDict, percentage=10):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n hc = data.describe()['Close']['max']\n lc = data.describe()['Close']['min']\n if ((hc - lc) <= (hc*percentage/100) and (hc - lc != 0)):\n dict['Consolidating'] = colorText.BOLD + colorText.GREEN + \"Range = \" + str(round((abs((hc-lc)/hc)*100),2))+\"%\" + colorText.END\n else:\n dict['Consolidating'] = colorText.BOLD + colorText.FAIL + \"Range = \" + str(round((abs((hc-lc)/hc)*100),2)) + \"%\" + colorText.END\n saveDict['Consolidating'] = str(round((abs((hc-lc)/hc)*100),2))+\"%\"\n return round((abs((hc-lc)/hc)*100),2)\n\n # Validate Moving averages and look for buy/sell signals\n def validateMovingAverages(data, dict, saveDict, range=2.5):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n recent = data.head(1)\n if(recent['SMA'][0] > recent['LMA'][0] and recent['Close'][0] > recent['SMA'][0]):\n dict['MA-Signal'] = colorText.BOLD + colorText.GREEN + 'Bullish' + colorText.END\n saveDict['MA-Signal'] = 'Bullish'\n elif(recent['SMA'][0] < recent['LMA'][0]):\n dict['MA-Signal'] = colorText.BOLD + colorText.FAIL + 'Bearish' + colorText.END\n saveDict['MA-Signal'] = 'Bearish'\n else:\n dict['MA-Signal'] = colorText.BOLD + colorText.WARN + 'Neutral' + colorText.END\n saveDict['MA-Signal'] = 'Neutral'\n\n smaDev = data['SMA'][0] * range / 100\n lmaDev = data['LMA'][0] * range / 100\n open, high, low, close, sma, lma = data['Open'][0], data['High'][0], data['Low'][0], data['Close'][0], data['SMA'][0], data['LMA'][0]\n maReversal = 0\n # Taking Support 50\n if close > sma and low <= (sma + smaDev):\n dict['MA-Signal'] = colorText.BOLD + colorText.GREEN + '50MA-Support' + colorText.END\n saveDict['MA-Signal'] = '50MA-Support'\n maReversal = 1\n # Validating Resistance 50\n elif close < sma and high >= (sma - smaDev):\n dict['MA-Signal'] = colorText.BOLD + colorText.FAIL + '50MA-Resist' + colorText.END\n saveDict['MA-Signal'] = '50MA-Resist'\n maReversal = -1\n # Taking Support 200\n elif close > lma and low <= (lma + lmaDev):\n dict['MA-Signal'] = colorText.BOLD + colorText.GREEN + '200MA-Support' + colorText.END\n saveDict['MA-Signal'] = '200MA-Support'\n maReversal = 1\n # Validating Resistance 200\n elif close < lma and high >= (lma - lmaDev):\n dict['MA-Signal'] = colorText.BOLD + colorText.FAIL + '200MA-Resist' + colorText.END\n saveDict['MA-Signal'] = '200MA-Resist'\n maReversal = -1\n # For a Bullish Candle\n if tools.getCandleType(data):\n # Crossing up 50\n if open < sma and close > sma:\n dict['MA-Signal'] = colorText.BOLD + colorText.GREEN + 'BullCross-50MA' + colorText.END\n saveDict['MA-Signal'] = 'BullCross-50MA'\n maReversal = 1 \n # Crossing up 200\n elif open < lma and close > lma:\n dict['MA-Signal'] = colorText.BOLD + colorText.GREEN + 'BullCross-200MA' + colorText.END\n saveDict['MA-Signal'] = 'BullCross-200MA'\n maReversal = 1\n # For a Bearish Candle\n elif not tools.getCandleType(data):\n # Crossing down 50\n if open > sma and close < sma:\n dict['MA-Signal'] = colorText.BOLD + colorText.FAIL + 'BearCross-50MA' + colorText.END\n saveDict['MA-Signal'] = 'BearCross-50MA'\n maReversal = -1 \n # Crossing up 200\n elif open > lma and close < lma:\n dict['MA-Signal'] = colorText.BOLD + colorText.FAIL + 'BearCross-200MA' + colorText.END\n saveDict['MA-Signal'] = 'BearCross-200MA'\n maReversal = -1\n return maReversal\n\n # Validate if volume of last day is higher than avg\n def validateVolume(data, dict, saveDict, volumeRatio=2.5):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n recent = data.head(1)\n ratio = round(recent['Volume'][0]/recent['VolMA'][0],2)\n saveDict['Volume'] = str(ratio)+\"x\"\n if(ratio >= volumeRatio and ratio != np.nan and (not math.isinf(ratio)) and (ratio != 20)):\n dict['Volume'] = colorText.BOLD + colorText.GREEN + str(ratio) + \"x\" + colorText.END\n return True\n else:\n dict['Volume'] = colorText.BOLD + colorText.FAIL + str(ratio) + \"x\" + colorText.END\n return False\n\n # Find accurate breakout value\n def findBreakout(data, dict, saveDict, daysToLookback):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n recent = data.head(1)\n data = data[1:]\n hs = round(data.describe()['High']['max'],2)\n hc = round(data.describe()['Close']['max'],2)\n rc = round(recent['Close'][0],2)\n if hs > hc:\n if ((hs - hc) <= (hs*2/100)):\n saveDict['Breaking-Out'] = str(hc)\n if rc >= hc:\n dict['Breaking-Out'] = colorText.BOLD + colorText.GREEN + \"BO: \" + str(hc) + \" R: \" + str(hs) + colorText.END\n return True\n else:\n dict['Breaking-Out'] = colorText.BOLD + colorText.FAIL + \"BO: \" + str(hc) + \" R: \" + str(hs) + colorText.END\n return False\n else: \n noOfHigherShadows = len(data[data.High > hc])\n if(daysToLookback/noOfHigherShadows <= 3):\n saveDict['Breaking-Out'] = str(hs)\n if rc >= hs:\n dict['Breaking-Out'] = colorText.BOLD + colorText.GREEN + \"BO: \" + str(hs) + colorText.END\n return True\n else:\n dict['Breaking-Out'] = colorText.BOLD + colorText.FAIL + \"BO: \" + str(hs) + colorText.END\n return False\n else:\n saveDict['Breaking-Out'] = str(hc) + \", \" + str(hs)\n if rc >= hc:\n dict['Breaking-Out'] = colorText.BOLD + colorText.GREEN + \"BO: \" + str(hc) + \" R: \" + str(hs) + colorText.END\n return True\n else:\n dict['Breaking-Out'] = colorText.BOLD + colorText.FAIL + \"BO: \" + str(hc) + \" R: \" + str(hs) + colorText.END\n return False\n else:\n saveDict['Breaking-Out'] = str(hc)\n if rc >= hc:\n dict['Breaking-Out'] = colorText.BOLD + colorText.GREEN + \"BO: \" + str(hc) + colorText.END\n return True\n else:\n dict['Breaking-Out'] = colorText.BOLD + colorText.FAIL + \"BO: \" + str(hc) + colorText.END\n return False\n\n # Validate 'Inside Bar' structure for recent days\n def validateInsideBar(data, dict, saveDict, daysToLookback=4):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n data = data.head(daysToLookback)\n lowsData = data.sort_values(by=['Low'], ascending=False)\n highsData = data.sort_values(by=['High'], ascending=True)\n if(highsData.equals(lowsData)):\n dict['Pattern'] = colorText.BOLD + colorText.GREEN + (\"Inside Bar (%d days)\" % daysToLookback) + colorText.END\n saveDict['Pattern'] = \"Inside Bar (%d days)\" % daysToLookback\n return True\n dict['Pattern'] = ''\n saveDict['Pattern'] = ''\n return False\n\n # Validate if recent volume is lowest of last 'N' Days\n def validateLowestVolume(data, daysForLowestVolume):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n if daysForLowestVolume == None:\n daysForLowestVolume = 30\n data = data.head(daysForLowestVolume)\n recent = data.head(1)\n if((recent['Volume'][0] <= data.describe()['Volume']['min']) and recent['Volume'][0] != np.nan):\n return True\n return False\n\n # validate if RSI is within given range\n def validateRSI(data, dict, saveDict, minRSI, maxRSI):\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n rsi = int(data.head(1)['RSI'][0])\n saveDict['RSI'] = rsi\n if(rsi >= minRSI and rsi <= maxRSI) and (rsi <= 70 and rsi >= 30):\n dict['RSI'] = colorText.BOLD + colorText.GREEN + str(rsi) + colorText.END\n return True\n dict['RSI'] = colorText.BOLD + colorText.FAIL + str(rsi) + colorText.END\n return False\n\n # Find out trend for days to lookback\n def findTrend(data, dict, saveDict, daysToLookback=ConfigManager.daysToLookback,stockName=\"\"):\n data = data.head(daysToLookback)\n data = data[::-1]\n data = data.set_index(np.arange(len(data)))\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n with SuppressOutput(suppress_stdout=True,suppress_stderr=True):\n data['tops'] = data['Close'].iloc[list(argrelextrema(np.array(data['Close']), np.greater_equal, order=1)[0])]\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n try:\n try:\n if len(data) < daysToLookback:\n raise StockDataNotAdequate\n slope,c = np.polyfit(data.index[data.tops > 0], data['tops'][data.tops > 0], 1)\n except Exception as e:\n slope,c = 0,0\n angle = np.rad2deg(np.arctan(slope))\n if (angle == 0):\n dict['Trend'] = colorText.BOLD + \"Unknown\" + colorText.END\n saveDict['Trend'] = 'Unknown'\n elif (angle <= 30 and angle >= -30):\n dict['Trend'] = colorText.BOLD + colorText.WARN + \"Sideways\" + colorText.END\n saveDict['Trend'] = 'Sideways'\n elif (angle >= 30 and angle < 61):\n dict['Trend'] = colorText.BOLD + colorText.GREEN + \"Weak Up\" + colorText.END\n saveDict['Trend'] = 'Weak Up'\n elif angle >= 60:\n dict['Trend'] = colorText.BOLD + colorText.GREEN + \"Strong Up\" + colorText.END\n saveDict['Trend'] = 'Strong Up'\n elif (angle >= -30 and angle < -61):\n dict['Trend'] = colorText.BOLD + colorText.FAIL + \"Weak Down\" + colorText.END\n saveDict['Trend'] = 'Weak Down'\n elif angle <= -60:\n dict['Trend'] = colorText.BOLD + colorText.FAIL + \"Strong Down\" + colorText.END\n saveDict['Trend'] = 'Strong Down'\n except np.linalg.LinAlgError:\n dict['Trend'] = colorText.BOLD + \"Unknown\" + colorText.END\n saveDict['Trend'] = 'Unknown'\n return saveDict['Trend']\n \n # Debugging - Experiment with data\n # import matplotlib.pyplot as plt\n # print(saveDict['Trend'])\n # print(slope)\n # print(math.degrees(math.atan(slope)))\n # plt.scatter(data.index[data.tops > 0], data['tops'][data.tops > 0], c='r')\n # plt.plot(data.index, data['Close'])\n # plt.plot(data.index, slope*data.index+c,)\n # plt.show()\n\n '''\n # Find out trend for days to lookback\n def validateVCP(data, dict, saveDict, daysToLookback=ConfigManager.daysToLookback, stockName=None):\n data = data.head(daysToLookback)\n data = data[::-1]\n data = data.set_index(np.arange(len(data)))\n data = data.fillna(0)\n data = data.replace([np.inf, -np.inf], 0)\n data['tops'] = data['Close'].iloc[list(argrelextrema(np.array(data['Close']), np.greater_equal, order=3)[0])]\n data['bots'] = data['Close'].iloc[list(argrelextrema(np.array(data['Close']), np.less_equal, order=3)[0])]\n try:\n try:\n top_slope,top_c = np.polyfit(data.index[data.tops > 0], data['tops'][data.tops > 0], 1)\n bot_slope,bot_c = np.polyfit(data.index[data.bots > 0], data['bots'][data.bots > 0], 1)\n topAngle = math.degrees(math.atan(top_slope))\n vcpAngle = math.degrees(math.atan(bot_slope) - math.atan(top_slope))\n\n # print(math.degrees(math.atan(top_slope)))\n # print(math.degrees(math.atan(bot_slope)))\n # print(vcpAngle)\n # print(topAngle)\n # print(data.max()['bots'])\n # print(data.max()['tops'])\n if (vcpAngle > 20 and vcpAngle < 70) and (topAngle > -10 and topAngle < 10) and (data['bots'].max() <= data['tops'].max()) and (len(data['bots'][data.bots > 0]) > 1):\n print(\"---> GOOD VCP %s at %sRs\" % (stockName, top_c))\n import os\n os.system(\"echo %s >> vcp_plots\\VCP.txt\" % stockName)\n\n import matplotlib.pyplot as plt \n plt.scatter(data.index[data.tops > 0], data['tops'][data.tops > 0], c='g')\n plt.scatter(data.index[data.bots > 0], data['bots'][data.bots > 0], c='r')\n plt.plot(data.index, data['Close'])\n plt.plot(data.index, top_slope*data.index+top_c,'g--')\n plt.plot(data.index, bot_slope*data.index+bot_c,'r--')\n if stockName != None:\n plt.title(stockName)\n # plt.show()\n plt.savefig('vcp_plots\\%s.png' % stockName)\n plt.clf()\n except np.RankWarning:\n pass\n except np.linalg.LinAlgError:\n return False\n '''\n \n"
] |
[
[
"numpy.polyfit",
"numpy.array",
"numpy.arctan"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ArneRustad/Master-thesis-cf
|
[
"23b993b2877ff1506896c4181c4151578091b602"
] |
[
"run_hp_idun2.py"
] |
[
"print(\"Starting hyperparameter tuning on Idun\")\r\nimport os\r\nimport helpers.hp_tuning.hp_gen\r\nfrom tabGAN import TabGAN\r\nfrom src import constants as const\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nn_epochs = 100\r\nn_critic = 10\r\nopt_lr = 0.0002\r\nadam_beta1 = 0.5\r\nnoise_discrete_unif_max = 0\r\nbatch_size = 500\r\nprogress_bar_subsubprocess = False\r\njit_compile_train_step = False\r\n\r\nconst.dir.storage = lambda: \"/cluster/work/arneir\"\r\nprint(\"Storage dir:\", const.dir.storage())\r\n\r\ndataset_train_path = os.path.join(const.dir.data(), \"df_adult_edited_train.csv\")\r\ndataset_test_path = os.path.join(const.dir.data(), \"df_adult_edited_test.csv\")\r\n\r\ndata_train = pd.read_csv(dataset_train_path)\r\ndata_test = pd.read_csv(dataset_test_path)\r\ndiscrete_columns = data_train.columns[data_train.dtypes == \"object\"]\r\n\r\nactivation_function_vec = [(\"LeakyReLU\", False), (\"GELU\", False), (\"GELU\", True)]\r\nn_synthetic_datasets_activation_function_comparison = 10\r\nn_epochs_activation_function = 100\r\n\r\ndef create_tabGAN_for_activation_function(activation_function, approximate):\r\n tg_qtr = TabGAN(data_train, n_critic = n_critic, opt_lr = opt_lr, adam_beta1 = adam_beta1,\r\n quantile_transformation_int = True, quantile_rand_transformation = True,\r\n noise_discrete_unif_max = noise_discrete_unif_max, tf_data_use=True,\r\n activation_function=activation_function, gelu_approximate=approximate)\r\n return tg_qtr\r\n\r\nhelpers.hp_tuning.generate_multiple_datasets_for_multiple_hyperparameters(\r\n create_tabGAN_func=create_tabGAN_for_activation_function,\r\n hyperparams_vec=activation_function_vec,\r\n n_epochs=n_epochs_activation_function,\r\n dataset_dir=const.dir.hyperparams_tuning(),\r\n batch_size=batch_size,\r\n subfolder=\"tabGAN-qtr\",\r\n n_synthetic_datasets=n_synthetic_datasets_activation_function_comparison,\r\n restart=False,\r\n redo_hyperparams_vec = [],\r\n hyperparams_name = \"activation\",\r\n hyperparams_subname=[\"function\", \"approximate\"],\r\n add_comparison_folder=True,\r\n overwrite_dataset=False,\r\n progress_bar_subprocess=True,\r\n progress_bar_subsubprocess=progress_bar_subsubprocess\r\n)\r\n\r\n\r\n\r\nctgan_vec = [(False, False, False, False)]\r\nctgan_vec += [(bin_loss, True, log_freq, add_connection)\r\n for bin_loss in [False, True]\r\n for log_freq in [False, True]\r\n for add_connection in [False, True]]\r\nn_synthetic_datasets_ctgan_comparison = 25\r\nn_epochs_ctgan = 100\r\n\r\ndef create_tabGAN_for_ctgan(ctgan, ctgan_log_frequency, ctgan_binomial_loss, add_connection_query_to_discrete):\r\n if ctgan:\r\n tf_data_use=False\r\n else:\r\n tf_data_use=True\r\n tg_qtr = TabGAN(data_train, n_critic = n_critic, opt_lr = opt_lr, adam_beta1 = adam_beta1,\r\n quantile_transformation_int = True, quantile_rand_transformation = True,\r\n noise_discrete_unif_max = noise_discrete_unif_max, tf_data_use=tf_data_use,\r\n ctgan=ctgan, ctgan_log_frequency=ctgan_log_frequency,\r\n ctgan_binomial_loss=ctgan_binomial_loss,\r\n add_connection_query_to_discrete=add_connection_query_to_discrete)\r\n return tg_qtr\r\n\r\nhelpers.hp_tuning.generate_multiple_datasets_for_multiple_hyperparameters(\r\n create_tabGAN_func=create_tabGAN_for_ctgan,\r\n hyperparams_vec=ctgan_vec,\r\n n_epochs=n_epochs_ctgan,\r\n dataset_dir=const.dir.hyperparams_tuning(),\r\n batch_size=batch_size,\r\n subfolder=\"tabGAN-qtr\",\r\n n_synthetic_datasets=n_synthetic_datasets_ctgan_comparison,\r\n restart = True,\r\n redo_hyperparams_vec = [],\r\n hyperparams_name = \"categorical_query\",\r\n hyperparams_subname=[\"ctgan_binomial_loss\", \"ctgan\", \"log_frequency\", \"add_connection_query_to_discrete\"],\r\n add_comparison_folder=True,\r\n overwrite_dataset=False,\r\n progress_bar_subprocess=True,\r\n progress_bar_subsubprocess=progress_bar_subsubprocess\r\n)"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
REMeyer/astropy
|
[
"28c49fb618538a01812e586cd07bccdf0591a6c6",
"28c49fb618538a01812e586cd07bccdf0591a6c6",
"28c49fb618538a01812e586cd07bccdf0591a6c6",
"28c49fb618538a01812e586cd07bccdf0591a6c6",
"28c49fb618538a01812e586cd07bccdf0591a6c6",
"28c49fb618538a01812e586cd07bccdf0591a6c6"
] |
[
"astropy/table/pprint.py",
"astropy/modeling/core.py",
"astropy/table/tests/test_info.py",
"astropy/io/ascii/tests/test_c_reader.py",
"astropy/coordinates/transformations.py",
"astropy/modeling/tests/test_core.py"
] |
[
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom ..extern import six\nfrom ..extern.six import text_type\nfrom ..extern.six.moves import zip, range\n\nimport os\nimport sys\nimport re\n\nimport numpy as np\n\nfrom .. import log\nfrom ..utils.console import Getch, color_print, terminal_size, conf\nfrom ..utils.data_info import dtype_info_name\n\n__all__ = []\n\n\ndef default_format_func(format_, val):\n if isinstance(val, bytes):\n return val.decode('utf-8', errors='replace')\n else:\n return text_type(val)\n\n\n# The first three functions are helpers for _auto_format_func\n\ndef _use_str_for_masked_values(format_func):\n \"\"\"Wrap format function to trap masked values.\n\n String format functions and most user functions will not be able to deal\n with masked values, so we wrap them to ensure they are passed to str().\n \"\"\"\n return lambda format_, val: (str(val) if val is np.ma.masked\n else format_func(format_, val))\n\n\ndef _possible_string_format_functions(format_):\n \"\"\"Iterate through possible string-derived format functions.\n\n A string can either be a format specifier for the format built-in,\n a new-style format string, or an old-style format string.\n \"\"\"\n yield lambda format_, val: format(val, format_)\n yield lambda format_, val: format_.format(val)\n yield lambda format_, val: format_ % val\n\n\ndef get_auto_format_func(\n col=None,\n possible_string_format_functions=_possible_string_format_functions):\n \"\"\"\n Return a wrapped ``auto_format_func`` function which is used in\n formatting table columns. This is primarily an internal function but\n gets used directly in other parts of astropy, e.g. `astropy.io.ascii`.\n\n Parameters\n ----------\n col_name : object, optional\n Hashable object to identify column like id or name. Default is None.\n\n possible_string_format_functions : func, optional\n Function that yields possible string formatting functions\n (defaults to internal function to do this).\n\n Returns\n -------\n Wrapped ``auto_format_func`` function\n \"\"\"\n\n def _auto_format_func(format_, val):\n \"\"\"Format ``val`` according to ``format_`` for a plain format specifier,\n old- or new-style format strings, or using a user supplied function.\n More importantly, determine and cache (in _format_funcs) a function\n that will do this subsequently. In this way this complicated logic is\n only done for the first value.\n\n Returns the formatted value.\n \"\"\"\n if format_ is None:\n return default_format_func(format_, val)\n\n if format_ in col.info._format_funcs:\n return col.info._format_funcs[format_](format_, val)\n\n if six.callable(format_):\n format_func = lambda format_, val: format_(val)\n try:\n out = format_func(format_, val)\n if not isinstance(out, six.string_types):\n raise ValueError('Format function for value {0} returned {1} '\n 'instead of string type'\n .format(val, type(val)))\n except Exception as err:\n # For a masked element, the format function call likely failed\n # to handle it. Just return the string representation for now,\n # and retry when a non-masked value comes along.\n if val is np.ma.masked:\n return str(val)\n\n raise ValueError('Format function for value {0} failed: {1}'\n .format(val, err))\n # If the user-supplied function handles formatting masked elements, use\n # it directly. Otherwise, wrap it in a function that traps them.\n try:\n format_func(format_, np.ma.masked)\n except Exception:\n format_func = _use_str_for_masked_values(format_func)\n else:\n # For a masked element, we cannot set string-based format functions yet,\n # as all tests below will fail. Just return the string representation\n # of masked for now, and retry when a non-masked value comes along.\n if val is np.ma.masked:\n return str(val)\n\n for format_func in possible_string_format_functions(format_):\n try:\n # Does this string format method work?\n out = format_func(format_, val)\n # Require that the format statement actually did something.\n if out == format_:\n raise ValueError('the format passed in did nothing.')\n except Exception:\n continue\n else:\n break\n else:\n # None of the possible string functions passed muster.\n raise ValueError('Unable to parse format string {0}'\n .format(format_))\n\n # String-based format functions will fail on masked elements;\n # wrap them in a function that traps them.\n format_func = _use_str_for_masked_values(format_func)\n\n col.info._format_funcs[format_] = format_func\n return out\n\n return _auto_format_func\n\n\nclass TableFormatter(object):\n @staticmethod\n def _get_pprint_size(max_lines=None, max_width=None):\n \"\"\"Get the output size (number of lines and character width) for Column and\n Table pformat/pprint methods.\n\n If no value of ``max_lines`` is supplied then the height of the\n screen terminal is used to set ``max_lines``. If the terminal\n height cannot be determined then the default will be determined\n using the ``astropy.table.conf.max_lines`` configuration item. If a\n negative value of ``max_lines`` is supplied then there is no line\n limit applied.\n\n The same applies for max_width except the configuration item is\n ``astropy.table.conf.max_width``.\n\n Parameters\n ----------\n max_lines : int or None\n Maximum lines of output (header + data rows)\n\n max_width : int or None\n Maximum width (characters) output\n\n Returns\n -------\n max_lines, max_width : int\n\n \"\"\"\n if max_lines is None:\n max_lines = conf.max_lines\n\n if max_width is None:\n max_width = conf.max_width\n\n if max_lines is None or max_width is None:\n lines, width = terminal_size()\n\n if max_lines is None:\n max_lines = lines\n elif max_lines < 0:\n max_lines = sys.maxsize\n if max_lines < 8:\n max_lines = 8\n\n if max_width is None:\n max_width = width\n elif max_width < 0:\n max_width = sys.maxsize\n if max_width < 10:\n max_width = 10\n\n return max_lines, max_width\n\n def _pformat_col(self, col, max_lines=None, show_name=True, show_unit=None,\n show_dtype=False, show_length=None, html=False, align=None):\n \"\"\"Return a list of formatted string representation of column values.\n\n Parameters\n ----------\n max_lines : int\n Maximum lines of output (header + data rows)\n\n show_name : bool\n Include column name. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is to show a row\n for units only if one or more columns has a defined value\n for the unit.\n\n show_dtype : bool\n Include column dtype. Default is False.\n\n show_length : bool\n Include column length at end. Default is to show this only\n if the column is not shown completely.\n\n html : bool\n Output column as HTML\n\n align : str\n Left/right alignment of columns. Default is '>' (right) for all\n columns. Other allowed values are '<', '^', and '0=' for left,\n centered, and 0-padded, respectively.\n\n Returns\n -------\n lines : list\n List of lines with formatted column values\n\n outs : dict\n Dict which is used to pass back additional values\n defined within the iterator.\n\n \"\"\"\n if show_unit is None:\n show_unit = col.info.unit is not None\n\n outs = {} # Some values from _pformat_col_iter iterator that are needed here\n col_strs_iter = self._pformat_col_iter(col, max_lines, show_name=show_name,\n show_unit=show_unit,\n show_dtype=show_dtype,\n show_length=show_length,\n outs=outs)\n col_strs = list(col_strs_iter)\n if len(col_strs) > 0:\n col_width = max(len(x) for x in col_strs)\n\n if html:\n from ..utils.xml.writer import xml_escape\n n_header = outs['n_header']\n for i, col_str in enumerate(col_strs):\n # _pformat_col output has a header line '----' which is not needed here\n if i == n_header - 1:\n continue\n td = 'th' if i < n_header else 'td'\n val = '<{0}>{1}</{2}>'.format(td, xml_escape(col_str.strip()), td)\n row = ('<tr>' + val + '</tr>')\n if i < n_header:\n row = ('<thead>' + row + '</thead>')\n col_strs[i] = row\n\n if n_header > 0:\n # Get rid of '---' header line\n col_strs.pop(n_header - 1)\n col_strs.insert(0, '<table>')\n col_strs.append('</table>')\n\n # Now bring all the column string values to the same fixed width\n else:\n col_width = max(len(x) for x in col_strs) if col_strs else 1\n\n # Center line header content and generate dashed headerline\n for i in outs['i_centers']:\n col_strs[i] = col_strs[i].center(col_width)\n if outs['i_dashes'] is not None:\n col_strs[outs['i_dashes']] = '-' * col_width\n\n # Format columns according to alignment. `align` arg has precedent, otherwise\n # use `col.format` if it starts as a legal alignment string. If neither applies\n # then right justify.\n re_fill_align = re.compile(r'(?P<fill>.?)(?P<align>[<^>=])')\n match = None\n if align:\n # If there is an align specified then it must match\n match = re_fill_align.match(align)\n if not match:\n raise ValueError(\"column align must be one of '<', '^', '>', or '='\")\n elif isinstance(col.info.format, six.string_types):\n # col.info.format need not match, in which case rjust gets used\n match = re_fill_align.match(col.info.format)\n\n if match:\n fill_char = match.group('fill')\n align_char = match.group('align')\n if align_char == '=':\n if fill_char != '0':\n raise ValueError(\"fill character must be '0' for '=' align\")\n fill_char = '' # str.zfill gets used which does not take fill char arg\n else:\n fill_char = ''\n align_char = '>'\n\n justify_methods = {'<': 'ljust', '^': 'center', '>': 'rjust', '=': 'zfill'}\n justify_method = justify_methods[align_char]\n justify_args = (col_width, fill_char) if fill_char else (col_width,)\n\n for i, col_str in enumerate(col_strs):\n col_strs[i] = getattr(col_str, justify_method)(*justify_args)\n\n if outs['show_length']:\n col_strs.append('Length = {0} rows'.format(len(col)))\n\n return col_strs, outs\n\n def _pformat_col_iter(self, col, max_lines, show_name, show_unit, outs,\n show_dtype=False, show_length=None):\n \"\"\"Iterator which yields formatted string representation of column values.\n\n Parameters\n ----------\n max_lines : int\n Maximum lines of output (header + data rows)\n\n show_name : bool\n Include column name. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is to show a row\n for units only if one or more columns has a defined value\n for the unit.\n\n outs : dict\n Must be a dict which is used to pass back additional values\n defined within the iterator.\n\n show_dtype : bool\n Include column dtype. Default is False.\n\n show_length : bool\n Include column length at end. Default is to show this only\n if the column is not shown completely.\n \"\"\"\n max_lines, _ = self._get_pprint_size(max_lines, -1)\n\n multidims = getattr(col, 'shape', [0])[1:]\n if multidims:\n multidim0 = tuple(0 for n in multidims)\n multidim1 = tuple(n - 1 for n in multidims)\n trivial_multidims = np.prod(multidims) == 1\n\n i_dashes = None\n i_centers = [] # Line indexes where content should be centered\n n_header = 0\n if show_name:\n i_centers.append(n_header)\n # Get column name (or 'None' if not set)\n col_name = six.text_type(col.info.name)\n if multidims:\n col_name += ' [{0}]'.format(\n ','.join(six.text_type(n) for n in multidims))\n n_header += 1\n yield col_name\n if show_unit:\n i_centers.append(n_header)\n n_header += 1\n yield six.text_type(col.info.unit or '')\n if show_dtype:\n i_centers.append(n_header)\n n_header += 1\n try:\n dtype = dtype_info_name(col.dtype)\n except AttributeError:\n dtype = 'object'\n yield six.text_type(dtype)\n if show_unit or show_name or show_dtype:\n i_dashes = n_header\n n_header += 1\n yield '---'\n\n max_lines -= n_header\n n_print2 = max_lines // 2\n n_rows = len(col)\n\n # This block of code is responsible for producing the function that\n # will format values for this column. The ``format_func`` function\n # takes two args (col_format, val) and returns the string-formatted\n # version. Some points to understand:\n #\n # - col_format could itself be the formatting function, so it will\n # actually end up being called with itself as the first arg. In\n # this case the function is expected to ignore its first arg.\n #\n # - auto_format_func is a function that gets called on the first\n # column value that is being formatted. It then determines an\n # appropriate formatting function given the actual value to be\n # formatted. This might be deterministic or it might involve\n # try/except. The latter allows for different string formatting\n # options like %f or {:5.3f}. When auto_format_func is called it:\n\n # 1. Caches the function in the _format_funcs dict so for subsequent\n # values the right function is called right away.\n # 2. Returns the formatted value.\n #\n # - possible_string_format_functions is a function that yields a\n # succession of functions that might successfully format the\n # value. There is a default, but Mixin methods can override this.\n # See Quantity for an example.\n #\n # - get_auto_format_func() returns a wrapped version of auto_format_func\n # with the column id and possible_string_format_functions as\n # enclosed variables.\n col_format = col.info.format or getattr(col.info, 'default_format', None)\n pssf = (getattr(col.info, 'possible_string_format_functions', None) or\n _possible_string_format_functions)\n auto_format_func = get_auto_format_func(col, pssf)\n format_func = col.info._format_funcs.get(col_format, auto_format_func)\n\n if len(col) > max_lines:\n if show_length is None:\n show_length = True\n i0 = n_print2 - (1 if show_length else 0)\n i1 = n_rows - n_print2 - max_lines % 2\n ii = np.concatenate([np.arange(0, i0 + 1), np.arange(i1 + 1, len(col))])\n else:\n i0 = -1\n ii = np.arange(len(col))\n\n # Add formatted values if within bounds allowed by max_lines\n for i in ii:\n if i == i0:\n yield '...'\n else:\n if multidims:\n # Prevents columns like Column(data=[[(1,)],[(2,)]], name='a')\n # with shape (n,1,...,1) from being printed as if there was\n # more than one element in a row\n if trivial_multidims:\n col_str = format_func(col_format, col[(i,) + multidim0])\n else:\n col_str = (format_func(col_format, col[(i,) + multidim0]) +\n ' .. ' +\n format_func(col_format, col[(i,) + multidim1]))\n else:\n col_str = format_func(col_format, col[i])\n yield col_str\n\n outs['show_length'] = show_length\n outs['n_header'] = n_header\n outs['i_centers'] = i_centers\n outs['i_dashes'] = i_dashes\n\n def _pformat_table(self, table, max_lines=None, max_width=None,\n show_name=True, show_unit=None, show_dtype=False,\n html=False, tableid=None, tableclass=None, align=None):\n \"\"\"Return a list of lines for the formatted string representation of\n the table.\n\n Parameters\n ----------\n max_lines : int or None\n Maximum number of rows to output\n\n max_width : int or None\n Maximum character width of output\n\n show_name : bool\n Include a header row for column names. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is to show a row\n for units only if one or more columns has a defined value\n for the unit.\n\n show_dtype : bool\n Include a header row for column dtypes. Default is False.\n\n html : bool\n Format the output as an HTML table. Default is False.\n\n tableid : str or None\n An ID tag for the table; only used if html is set. Default is\n \"table{id}\", where id is the unique integer id of the table object,\n id(table)\n\n tableclass : str or list of str or `None`\n CSS classes for the table; only used if html is set. Default is\n none\n\n align : str or list or tuple\n Left/right alignment of columns. Default is '>' (right) for all\n columns. Other allowed values are '<', '^', and '0=' for left,\n centered, and 0-padded, respectively. A list of strings can be\n provided for alignment of tables with multiple columns.\n\n Returns\n -------\n rows : list\n Formatted table as a list of strings\n\n outs : dict\n Dict which is used to pass back additional values\n defined within the iterator.\n\n \"\"\"\n # \"Print\" all the values into temporary lists by column for subsequent\n # use and to determine the width\n max_lines, max_width = self._get_pprint_size(max_lines, max_width)\n cols = []\n\n if show_unit is None:\n show_unit = any(col.info.unit for col in six.itervalues(table.columns))\n\n # Coerce align into a correctly-sized list of alignments (if possible)\n n_cols = len(table.columns)\n if align is None or isinstance(align, six.string_types):\n align = [align] * n_cols\n\n elif isinstance(align, (list, tuple)):\n if len(align) != n_cols:\n raise ValueError('got {0} alignment values instead of '\n 'the number of columns ({1})'\n .format(len(align), n_cols))\n else:\n raise TypeError('align keyword must be str or list or tuple (got {0})'\n .format(type(align)))\n\n for align_, col in zip(align, table.columns.values()):\n lines, outs = self._pformat_col(col, max_lines, show_name=show_name,\n show_unit=show_unit, show_dtype=show_dtype,\n align=align_)\n if outs['show_length']:\n lines = lines[:-1]\n cols.append(lines)\n\n if not cols:\n return ['<No columns>'], {'show_length': False}\n\n # Use the values for the last column since they are all the same\n n_header = outs['n_header']\n\n n_rows = len(cols[0])\n outwidth = lambda cols: sum(len(c[0]) for c in cols) + len(cols) - 1\n dots_col = ['...'] * n_rows\n middle = len(cols) // 2\n while outwidth(cols) > max_width:\n if len(cols) == 1:\n break\n if len(cols) == 2:\n cols[1] = dots_col\n break\n if cols[middle] is dots_col:\n cols.pop(middle)\n middle = len(cols) // 2\n cols[middle] = dots_col\n\n # Now \"print\" the (already-stringified) column values into a\n # row-oriented list.\n rows = []\n if html:\n from ..utils.xml.writer import xml_escape\n\n if tableid is None:\n tableid = 'table{id}'.format(id=id(table))\n\n if tableclass is not None:\n if isinstance(tableclass, list):\n tableclass = ' '.join(tableclass)\n rows.append('<table id=\"{tid}\" class=\"{tcls}\">'.format(\n tid=tableid, tcls=tableclass))\n else:\n rows.append('<table id=\"{tid}\">'.format(tid=tableid))\n\n for i in range(n_rows):\n # _pformat_col output has a header line '----' which is not needed here\n if i == n_header - 1:\n continue\n td = 'th' if i < n_header else 'td'\n vals = ('<{0}>{1}</{2}>'.format(td, xml_escape(col[i].strip()), td)\n for col in cols)\n row = ('<tr>' + ''.join(vals) + '</tr>')\n if i < n_header:\n row = ('<thead>' + row + '</thead>')\n rows.append(row)\n rows.append('</table>')\n else:\n for i in range(n_rows):\n row = ' '.join(col[i] for col in cols)\n rows.append(row)\n\n return rows, outs\n\n def _more_tabcol(self, tabcol, max_lines=None, max_width=None,\n show_name=True, show_unit=None, show_dtype=False):\n \"\"\"Interactive \"more\" of a table or column.\n\n Parameters\n ----------\n max_lines : int or None\n Maximum number of rows to output\n\n max_width : int or None\n Maximum character width of output\n\n show_name : bool\n Include a header row for column names. Default is True.\n\n show_unit : bool\n Include a header row for unit. Default is to show a row\n for units only if one or more columns has a defined value\n for the unit.\n\n show_dtype : bool\n Include a header row for column dtypes. Default is False.\n \"\"\"\n allowed_keys = 'f br<>qhpn'\n\n # Count the header lines\n n_header = 0\n if show_name:\n n_header += 1\n if show_unit:\n n_header += 1\n if show_dtype:\n n_header += 1\n if show_name or show_unit or show_dtype:\n n_header += 1\n\n # Set up kwargs for pformat call. Only Table gets max_width.\n kwargs = dict(max_lines=-1, show_name=show_name, show_unit=show_unit,\n show_dtype=show_dtype)\n if hasattr(tabcol, 'columns'): # tabcol is a table\n kwargs['max_width'] = max_width\n\n # If max_lines is None (=> query screen size) then increase by 2.\n # This is because get_pprint_size leaves 6 extra lines so that in\n # ipython you normally see the last input line.\n max_lines1, max_width = self._get_pprint_size(max_lines, max_width)\n if max_lines is None:\n max_lines1 += 2\n delta_lines = max_lines1 - n_header\n\n # Set up a function to get a single character on any platform\n inkey = Getch()\n\n i0 = 0 # First table/column row to show\n showlines = True\n while True:\n i1 = i0 + delta_lines # Last table/col row to show\n if showlines: # Don't always show the table (e.g. after help)\n try:\n os.system('cls' if os.name == 'nt' else 'clear')\n except Exception:\n pass # No worries if clear screen call fails\n lines = tabcol[i0:i1].pformat(**kwargs)\n colors = ('red' if i < n_header else 'default'\n for i in range(len(lines)))\n for color, line in zip(colors, lines):\n color_print(line, color)\n showlines = True\n print()\n print(\"-- f, <space>, b, r, p, n, <, >, q h (help) --\", end=' ')\n # Get a valid key\n while True:\n try:\n key = inkey().lower()\n except Exception:\n print(\"\\n\")\n log.error('Console does not support getting a character'\n ' as required by more(). Use pprint() instead.')\n return\n if key in allowed_keys:\n break\n print(key)\n\n if key.lower() == 'q':\n break\n elif key == ' ' or key == 'f':\n i0 += delta_lines\n elif key == 'b':\n i0 = i0 - delta_lines\n elif key == 'r':\n pass\n elif key == '<':\n i0 = 0\n elif key == '>':\n i0 = len(tabcol)\n elif key == 'p':\n i0 -= 1\n elif key == 'n':\n i0 += 1\n elif key == 'h':\n showlines = False\n print(\"\"\"\n Browsing keys:\n f, <space> : forward one page\n b : back one page\n r : refresh same page\n n : next row\n p : previous row\n < : go to beginning\n > : go to end\n q : quit browsing\n h : print this help\"\"\", end=' ')\n if i0 < 0:\n i0 = 0\n if i0 >= len(tabcol) - delta_lines:\n i0 = len(tabcol) - delta_lines\n print(\"\\n\")\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nThis module defines base classes for all models. The base class of all\nmodels is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is\nthe base class for all fittable models. Fittable models can be linear or\nnonlinear in a regression analysis sense.\n\nAll models provide a `__call__` method which performs the transformation in\na purely mathematical way, i.e. the models are unitless. Model instances can\nrepresent either a single model, or a \"model set\" representing multiple copies\nof the same type of model, but with potentially different values of the\nparameters in each model making up the set.\n\"\"\"\n\nfrom __future__ import (absolute_import, unicode_literals, division,\n print_function)\n\nimport abc\nimport copy\nimport inspect\nimport functools\nimport operator\nimport sys\nimport types\n\nfrom collections import defaultdict, OrderedDict\nfrom itertools import chain, islice\n\nimport numpy as np\n\nfrom ..utils import indent, isinstancemethod, metadata\nfrom ..extern import six\nfrom ..extern.six.moves import copyreg, zip\nfrom ..table import Table\nfrom ..units import Quantity, UnitsError, dimensionless_unscaled\nfrom ..units.utils import quantity_asanyarray\nfrom ..utils import (sharedmethod, find_current_module,\n InheritDocstrings, OrderedDescriptorContainer,\n check_broadcast, IncompatibleShapeError, isiterable)\nfrom ..utils.codegen import make_function_with_signature\nfrom ..utils.compat import suppress\nfrom ..utils.compat.funcsigs import signature\nfrom .utils import (combine_labels, make_binary_operator_eval,\n ExpressionTree, AliasDict, get_inputs_and_params,\n _BoundingBox, _combine_equivalency_dict)\nfrom ..nddata.utils import add_array, extract_array\n\nfrom .parameters import Parameter, InputParameterError, param_repr_oneline\n\n\n__all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel',\n 'custom_model', 'ModelDefinitionError']\n\n\nclass ModelDefinitionError(TypeError):\n \"\"\"Used for incorrect models definitions\"\"\"\n\n\ndef _model_oper(oper, **kwargs):\n \"\"\"\n Returns a function that evaluates a given Python arithmetic operator\n between two models. The operator should be given as a string, like ``'+'``\n or ``'**'``.\n\n Any additional keyword arguments passed in are passed to\n `_CompoundModelMeta._from_operator`.\n \"\"\"\n\n # Note: Originally this used functools.partial, but that won't work when\n # used in the class definition of _CompoundModelMeta since\n # _CompoundModelMeta has not been defined yet.\n\n # Perform an arithmetic operation on two models.\n return lambda left, right: _CompoundModelMeta._from_operator(oper,\n left, right, **kwargs)\n\n\nclass _ModelMeta(OrderedDescriptorContainer, InheritDocstrings, abc.ABCMeta):\n \"\"\"\n Metaclass for Model.\n\n Currently just handles auto-generating the param_names list based on\n Parameter descriptors declared at the class-level of Model subclasses.\n \"\"\"\n\n registry = set()\n \"\"\"\n A registry of all known concrete (non-abstract) Model subclasses.\n \"\"\"\n\n _is_dynamic = False\n \"\"\"\n This flag signifies whether this class was created in the \"normal\" way,\n with a class statement in the body of a module, as opposed to a call to\n `type` or some other metaclass constructor, such that the resulting class\n does not belong to a specific module. This is important for pickling of\n dynamic classes.\n\n This flag is always forced to False for new classes, so code that creates\n dynamic classes should manually set it to True on those classes when\n creating them.\n \"\"\"\n\n # Default empty dict for _parameters_, which will be empty on model\n # classes that don't have any Parameters\n _parameters_ = OrderedDict()\n\n def __new__(mcls, name, bases, members):\n # See the docstring for _is_dynamic above\n if '_is_dynamic' not in members:\n members['_is_dynamic'] = mcls._is_dynamic\n\n return super(_ModelMeta, mcls).__new__(mcls, name, bases, members)\n\n def __init__(cls, name, bases, members):\n # Make sure OrderedDescriptorContainer gets to run before doing\n # anything else\n super(_ModelMeta, cls).__init__(name, bases, members)\n\n if cls._parameters_:\n if hasattr(cls, '_param_names'):\n # Slight kludge to support compound models, where\n # cls.param_names is a property; could be improved with a\n # little refactoring but fine for now\n cls._param_names = tuple(cls._parameters_)\n else:\n cls.param_names = tuple(cls._parameters_)\n\n cls._create_inverse_property(members)\n cls._create_bounding_box_property(members)\n cls._handle_special_methods(members)\n\n if not inspect.isabstract(cls) and not name.startswith('_'):\n cls.registry.add(cls)\n\n def __repr__(cls):\n \"\"\"\n Custom repr for Model subclasses.\n \"\"\"\n\n return cls._format_cls_repr()\n\n def _repr_pretty_(cls, p, cycle):\n \"\"\"\n Repr for IPython's pretty printer.\n\n By default IPython \"pretty prints\" classes, so we need to implement\n this so that IPython displays the custom repr for Models.\n \"\"\"\n\n p.text(repr(cls))\n\n def __reduce__(cls):\n if not cls._is_dynamic:\n # Just return a string specifying where the class can be imported\n # from\n return cls.__name__\n else:\n members = dict(cls.__dict__)\n # Delete any ABC-related attributes--these will be restored when\n # the class is reconstructed:\n for key in list(members):\n if key.startswith('_abc_'):\n del members[key]\n\n # Delete custom __init__ and __call__ if they exist:\n for key in ('__init__', '__call__'):\n if key in members:\n del members[key]\n\n return (type(cls), (cls.__name__, cls.__bases__, members))\n\n @property\n def name(cls):\n \"\"\"\n The name of this model class--equivalent to ``cls.__name__``.\n\n This attribute is provided for symmetry with the `Model.name` attribute\n of model instances.\n \"\"\"\n\n return cls.__name__\n\n @property\n def n_inputs(cls):\n return len(cls.inputs)\n\n @property\n def n_outputs(cls):\n return len(cls.outputs)\n\n @property\n def _is_concrete(cls):\n \"\"\"\n A class-level property that determines whether the class is a concrete\n implementation of a Model--i.e. it is not some abstract base class or\n internal implementation detail (i.e. begins with '_').\n \"\"\"\n return not (cls.__name__.startswith('_') or inspect.isabstract(cls))\n\n def rename(cls, name):\n \"\"\"\n Creates a copy of this model class with a new name.\n\n The new class is technically a subclass of the original class, so that\n instance and type checks will still work. For example::\n\n >>> from astropy.modeling.models import Rotation2D\n >>> SkyRotation = Rotation2D.rename('SkyRotation')\n >>> SkyRotation\n <class '__main__.SkyRotation'>\n Name: SkyRotation (Rotation2D)\n Inputs: ('x', 'y')\n Outputs: ('x', 'y')\n Fittable parameters: ('angle',)\n >>> issubclass(SkyRotation, Rotation2D)\n True\n >>> r = SkyRotation(90)\n >>> isinstance(r, Rotation2D)\n True\n \"\"\"\n\n if six.PY2 and isinstance(name, six.text_type):\n # Unicode names are not allowed in Python 2, so just convert to\n # ASCII. As such, for cross-compatibility all model names should\n # just be ASCII for now.\n name = name.encode('ascii')\n\n mod = find_current_module(2)\n if mod:\n modname = mod.__name__\n else:\n modname = '__main__'\n\n new_cls = type(name, (cls,), {})\n # On Python 2 __module__ must be a str, not unicode\n new_cls.__module__ = str(modname)\n\n if hasattr(cls, '__qualname__'):\n if new_cls.__module__ == '__main__':\n # __main__ is not added to a class's qualified name\n new_cls.__qualname__ = name\n else:\n new_cls.__qualname__ = '{0}.{1}'.format(modname, name)\n\n return new_cls\n\n def _create_inverse_property(cls, members):\n inverse = members.get('inverse')\n if inverse is None or cls.__bases__[0] is object:\n # The latter clause is the prevent the below code from running on\n # the Model base class, which implements the default getter and\n # setter for .inverse\n return\n\n if isinstance(inverse, property):\n # We allow the @property decorator to be omitted entirely from\n # the class definition, though its use should be encouraged for\n # clarity\n inverse = inverse.fget\n\n # Store the inverse getter internally, then delete the given .inverse\n # attribute so that cls.inverse resolves to Model.inverse instead\n cls._inverse = inverse\n del cls.inverse\n\n def _create_bounding_box_property(cls, members):\n \"\"\"\n Takes any bounding_box defined on a concrete Model subclass (either\n as a fixed tuple or a property or method) and wraps it in the generic\n getter/setter interface for the bounding_box attribute.\n \"\"\"\n\n # TODO: Much of this is verbatim from _create_inverse_property--I feel\n # like there could be a way to generify properties that work this way,\n # but for the time being that would probably only confuse things more.\n bounding_box = members.get('bounding_box')\n if bounding_box is None or cls.__bases__[0] is object:\n return\n\n if isinstance(bounding_box, property):\n bounding_box = bounding_box.fget\n\n if not callable(bounding_box):\n # See if it's a hard-coded bounding_box (as a sequence) and\n # normalize it\n try:\n bounding_box = _BoundingBox.validate(cls, bounding_box)\n except ValueError as exc:\n raise ModelDefinitionError(exc.args[0])\n else:\n sig = signature(bounding_box)\n # May be a method that only takes 'self' as an argument (like a\n # property, but the @property decorator was forgotten)\n # TODO: Maybe warn in the above case?\n #\n # However, if the method takes additional arguments then this is a\n # parameterized bounding box and should be callable\n if len(sig.parameters) > 1:\n bounding_box = \\\n cls._create_bounding_box_subclass(bounding_box, sig)\n\n if six.PY2 and isinstance(bounding_box, types.MethodType):\n bounding_box = bounding_box.__func__\n\n # See the Model.bounding_box getter definition for how this attribute\n # is used\n cls._bounding_box = bounding_box\n del cls.bounding_box\n\n def _create_bounding_box_subclass(cls, func, sig):\n \"\"\"\n For Models that take optional arguments for defining their bounding\n box, we create a subclass of _BoundingBox with a ``__call__`` method\n that supports those additional arguments.\n\n Takes the function's Signature as an argument since that is already\n computed in _create_bounding_box_property, so no need to duplicate that\n effort.\n \"\"\"\n\n # TODO: Might be convenient if calling the bounding box also\n # automatically sets the _user_bounding_box. So that\n #\n # >>> model.bounding_box(arg=1)\n #\n # in addition to returning the computed bbox, also sets it, so that\n # it's a shortcut for\n #\n # >>> model.bounding_box = model.bounding_box(arg=1)\n #\n # Not sure if that would be non-obvious / confusing though...\n\n def __call__(self, **kwargs):\n return func(self._model, **kwargs)\n\n kwargs = []\n for idx, param in enumerate(sig.parameters.values()):\n if idx == 0:\n # Presumed to be a 'self' argument\n continue\n\n if param.default is param.empty:\n raise ModelDefinitionError(\n 'The bounding_box method for {0} is not correctly '\n 'defined: If defined as a method all arguments to that '\n 'method (besides self) must be keyword arguments with '\n 'default values that can be used to compute a default '\n 'bounding box.'.format(cls.name))\n\n kwargs.append((param.name, param.default))\n\n __call__ = make_function_with_signature(__call__, ('self',), kwargs)\n\n return type(str('_{0}BoundingBox'.format(cls.name)), (_BoundingBox,),\n {'__call__': __call__})\n\n def _handle_special_methods(cls, members):\n\n # Handle init creation from inputs\n def update_wrapper(wrapper, cls):\n # Set up the new __call__'s metadata attributes as though it were\n # manually defined in the class definition\n # A bit like functools.update_wrapper but uses the class instead of\n # the wrapped function\n wrapper.__module__ = cls.__module__\n wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__\n if hasattr(cls, '__qualname__'):\n wrapper.__qualname__ = '{0}.{1}'.format(\n cls.__qualname__, wrapper.__name__)\n\n if ('__call__' not in members and 'inputs' in members and\n isinstance(members['inputs'], tuple)):\n\n # Don't create a custom __call__ for classes that already have one\n # explicitly defined (this includes the Model base class, and any\n # other classes that manually override __call__\n\n def __call__(self, *inputs, **kwargs):\n \"\"\"Evaluate this model on the supplied inputs.\"\"\"\n return super(cls, self).__call__(*inputs, **kwargs)\n\n # When called, models can take two optional keyword arguments:\n #\n # * model_set_axis, which indicates (for multi-dimensional input)\n # which axis is used to indicate different models\n #\n # * equivalencies, a dictionary of equivalencies to be applied to\n # the input values, where each key should correspond to one of\n # the inputs.\n #\n # The following code creates the __call__ function with these\n # two keyword arguments.\n inputs = members['inputs']\n args = ('self',) + inputs\n new_call = make_function_with_signature(\n __call__, args, [('model_set_axis', None),\n ('with_bounding_box', False),\n ('fill_value', np.nan),\n ('equivalencies', None)])\n\n # The following makes it look like __call__ was defined in the class\n update_wrapper(new_call, cls)\n\n cls.__call__ = new_call\n\n if ('__init__' not in members and not inspect.isabstract(cls) and\n cls._parameters_):\n\n # If *all* the parameters have default values we can make them\n # keyword arguments; otherwise they must all be positional arguments\n if all(p.default is not None for p in six.itervalues(cls._parameters_)):\n args = ('self',)\n kwargs = []\n for param_name in cls.param_names:\n default = cls._parameters_[param_name].default\n unit = cls._parameters_[param_name].unit\n # If the unit was specified in the parameter but the default\n # is not a Quantity, attach the unit to the default.\n if unit is not None:\n default = Quantity(default, unit, copy=False)\n kwargs.append((param_name, default))\n else:\n args = ('self',) + cls.param_names\n kwargs = {}\n\n def __init__(self, *params, **kwargs):\n return super(cls, self).__init__(*params, **kwargs)\n\n new_init = make_function_with_signature(\n __init__, args, kwargs, varkwargs='kwargs')\n update_wrapper(new_init, cls)\n cls.__init__ = new_init\n\n # *** Arithmetic operators for creating compound models ***\n __add__ = _model_oper('+')\n __sub__ = _model_oper('-')\n __mul__ = _model_oper('*')\n __truediv__ = _model_oper('/')\n __pow__ = _model_oper('**')\n __or__ = _model_oper('|')\n __and__ = _model_oper('&')\n\n if six.PY2:\n # The classic __div__ operator need only be implemented for Python 2\n # without from __future__ import division\n __div__ = _model_oper('/')\n\n # *** Other utilities ***\n\n def _format_cls_repr(cls, keywords=[]):\n \"\"\"\n Internal implementation of ``__repr__``.\n\n This is separated out for ease of use by subclasses that wish to\n override the default ``__repr__`` while keeping the same basic\n formatting.\n \"\"\"\n\n # For the sake of familiarity start the output with the standard class\n # __repr__\n parts = [super(_ModelMeta, cls).__repr__()]\n\n if not cls._is_concrete:\n return parts[0]\n\n def format_inheritance(cls):\n bases = []\n for base in cls.mro()[1:]:\n if not issubclass(base, Model):\n continue\n elif (inspect.isabstract(base) or\n base.__name__.startswith('_')):\n break\n bases.append(base.name)\n if bases:\n return '{0} ({1})'.format(cls.name, ' -> '.join(bases))\n else:\n return cls.name\n\n try:\n default_keywords = [\n ('Name', format_inheritance(cls)),\n ('Inputs', cls.inputs),\n ('Outputs', cls.outputs),\n ]\n\n if cls.param_names:\n default_keywords.append(('Fittable parameters',\n cls.param_names))\n\n for keyword, value in default_keywords + keywords:\n if value is not None:\n parts.append('{0}: {1}'.format(keyword, value))\n\n return '\\n'.join(parts)\n except Exception:\n # If any of the above formatting fails fall back on the basic repr\n # (this is particularly useful in debugging)\n return parts[0]\n\n\[email protected]_metaclass(_ModelMeta)\nclass Model(object):\n \"\"\"\n Base class for all models.\n\n This is an abstract class and should not be instantiated directly.\n\n This class sets the constraints and other properties for all individual\n parameters and performs parameter validation.\n\n The following initialization arguments apply to the majority of Model\n subclasses by default (exceptions include specialized utility models\n like `~astropy.modeling.mappings.Mapping`). Parametric models take all\n their parameters as arguments, followed by any of the following optional\n keyword arguments:\n\n Parameters\n ----------\n name : str, optional\n A human-friendly name associated with this model instance\n (particularly useful for identifying the individual components of a\n compound model).\n\n meta : dict, optional\n An optional dict of user-defined metadata to attach to this model.\n How this is used and interpreted is up to the user or individual use\n case.\n\n n_models : int, optional\n If given an integer greater than 1, a *model set* is instantiated\n instead of a single model. This affects how the parameter arguments\n are interpreted. In this case each parameter must be given as a list\n or array--elements of this array are taken along the first axis (or\n ``model_set_axis`` if specified), such that the Nth element is the\n value of that parameter for the Nth model in the set.\n\n See the section on model sets in the documentation for more details.\n\n model_set_axis : int, optional\n This argument only applies when creating a model set (i.e. ``n_models >\n 1``). It changes how parameter values are interpreted. Normally the\n first axis of each input parameter array (properly the 0th axis) is\n taken as the axis corresponding to the model sets. However, any axis\n of an input array may be taken as this \"model set axis\". This accepts\n negative integers as well--for example use ``model_set_axis=-1`` if the\n last (most rapidly changing) axis should be associated with the model\n sets. Also, ``model_set_axis=False`` can be used to tell that a given\n input should be used to evaluate all the models in the model set.\n\n fixed : dict, optional\n Dictionary ``{parameter_name: bool}`` setting the fixed constraint\n for one or more parameters. `True` means the parameter is held fixed\n during fitting and is prevented from updates once an instance of the\n model has been created.\n\n Alternatively the `~astropy.modeling.Parameter.fixed` property of a\n parameter may be used to lock or unlock individual parameters.\n\n tied : dict, optional\n Dictionary ``{parameter_name: callable}`` of parameters which are\n linked to some other parameter. The dictionary values are callables\n providing the linking relationship.\n\n Alternatively the `~astropy.modeling.Parameter.tied` property of a\n parameter may be used to set the ``tied`` constraint on individual\n parameters.\n\n bounds : dict, optional\n A dictionary ``{parameter_name: value}`` of lower and upper bounds of\n parameters. Keys are parameter names. Values are a list or a tuple\n of length 2 giving the desired range for the parameter.\n\n Alternatively the `~astropy.modeling.Parameter.min` and\n `~astropy.modeling.Parameter.max` or\n ~astropy.modeling.Parameter.bounds` properties of a parameter may be\n used to set bounds on individual parameters.\n\n eqcons : list, optional\n List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``\n in a successfully optimized problem.\n\n ineqcons : list, optional\n List of functions of length n such that ``ieqcons[j](x0, *args) >=\n 0.0`` is a successfully optimized problem.\n\n Examples\n --------\n >>> from astropy.modeling import models\n >>> def tie_center(model):\n ... mean = 50 * model.stddev\n ... return mean\n >>> tied_parameters = {'mean': tie_center}\n\n Specify that ``'mean'`` is a tied parameter in one of two ways:\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,\n ... tied=tied_parameters)\n\n or\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)\n >>> g1.mean.tied\n False\n >>> g1.mean.tied = tie_center\n >>> g1.mean.tied\n <function tie_center at 0x...>\n\n Fixed parameters:\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,\n ... fixed={'stddev': True})\n >>> g1.stddev.fixed\n True\n\n or\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)\n >>> g1.stddev.fixed\n False\n >>> g1.stddev.fixed = True\n >>> g1.stddev.fixed\n True\n \"\"\"\n\n parameter_constraints = Parameter.constraints\n \"\"\"\n Primarily for informational purposes, these are the types of constraints\n that can be set on a model's parameters.\n \"\"\"\n model_constraints = ('eqcons', 'ineqcons')\n \"\"\"\n Primarily for informational purposes, these are the types of constraints\n that constrain model evaluation.\n \"\"\"\n\n param_names = ()\n \"\"\"\n Names of the parameters that describe models of this type.\n\n The parameters in this tuple are in the same order they should be passed in\n when initializing a model of a specific type. Some types of models, such\n as polynomial models, have a different number of parameters depending on\n some other property of the model, such as the degree.\n\n When defining a custom model class the value of this attribute is\n automatically set by the `~astropy.modeling.Parameter` attributes defined\n in the class body.\n \"\"\"\n\n inputs = ()\n \"\"\"The name(s) of the input variable(s) on which a model is evaluated.\"\"\"\n outputs = ()\n \"\"\"The name(s) of the output(s) of the model.\"\"\"\n\n standard_broadcasting = True\n fittable = False\n linear = True\n\n meta = metadata.MetaData()\n \"\"\"A dict-like object to store optional information.\"\"\"\n\n # By default models either use their own inverse property or have no\n # inverse at all, but users may also assign a custom inverse to a model,\n # optionally; in that case it is of course up to the user to determine\n # whether their inverse is *actually* an inverse to the model they assign\n # it to.\n _inverse = None\n _user_inverse = None\n\n _bounding_box = None\n _user_bounding_box = None\n\n # Default n_models attribute, so that __len__ is still defined even when a\n # model hasn't completed initialization yet\n _n_models = 1\n\n # Enforce strict units on inputs to evaluate. If this is set to True, input\n # values to evaluate have to be in the exact right units specified by\n # input_units. In this case, if the input quantities are convertible to\n # input_units, they are converted.\n input_units_strict = False\n\n # Allow dimensionless input (and corresponding output). If this is True,\n # input values to evaluate will gain the units specified in input_units.\n # Only has an effect if input_units is defined.\n input_units_allow_dimensionless = False\n\n # Default equivalencies to apply to input values. If set, this should be a\n # dictionary where each key is a string that corresponds to one of the model\n # inputs. Only has an effect if input_units is defined.\n input_units_equivalencies = None\n\n def __init__(self, *args, **kwargs):\n super(Model, self).__init__()\n meta = kwargs.pop('meta', None)\n if meta is not None:\n self.meta = meta\n\n self._name = kwargs.pop('name', None)\n\n self._initialize_constraints(kwargs)\n # Remaining keyword args are either parameter values or invalid\n # Parameter values must be passed in as keyword arguments in order to\n # distinguish them\n self._initialize_parameters(args, kwargs)\n\n def __repr__(self):\n return self._format_repr()\n\n def __str__(self):\n return self._format_str()\n\n def __len__(self):\n return self._n_models\n\n def __call__(self, *inputs, **kwargs):\n \"\"\"\n Evaluate this model using the given input(s) and the parameter values\n that were specified when the model was instantiated.\n \"\"\"\n\n inputs, format_info = self.prepare_inputs(*inputs, **kwargs)\n\n # Check whether any of the inputs are quantities\n inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])\n\n parameters = self._param_sets(raw=True, units=True)\n with_bbox = kwargs.pop('with_bounding_box', False)\n fill_value = kwargs.pop('fill_value', np.nan)\n bbox = None\n if with_bbox:\n try:\n bbox = self.bounding_box\n except NotImplementedError:\n bbox = None\n if self.n_inputs > 1 and bbox is not None:\n # bounding_box is in python order - convert it to the order of the inputs\n bbox = bbox[::-1]\n if bbox is None:\n outputs = self.evaluate(*chain(inputs, parameters))\n else:\n if self.n_inputs == 1:\n bbox = [bbox]\n # indices where input is outside the bbox\n # have a value of 1 in ``nan_ind``\n nan_ind = np.zeros(inputs[0].shape, dtype=np.bool)\n for ind, inp in enumerate(inputs):\n # Pass an ``out`` array so that ``axis_ind`` is array for scalars as well.\n axis_ind = np.zeros(inp.shape, dtype=np.bool)\n axis_ind = np.logical_or(inp < bbox[ind][0], inp > bbox[ind][1], out=axis_ind)\n nan_ind[axis_ind] = 1\n # get an array with indices of valid inputs\n valid_ind = np.logical_not(nan_ind).nonzero()\n # inputs holds only inputs within the bbox\n args = []\n for input in inputs:\n if not input.shape:\n # shape is ()\n if nan_ind:\n outputs = [fill_value for a in args]\n else:\n args.append(input)\n else:\n args.append(input[valid_ind])\n valid_result = self.evaluate(*chain(args, parameters))\n if self.n_outputs == 1:\n valid_result = [valid_result]\n # combine the valid results with the ``fill_value`` values\n # outside the bbox\n result = [np.zeros(inputs[0].shape) + fill_value for i in range(len(valid_result))]\n for ind, r in enumerate(valid_result):\n if not result[ind].shape:\n # shape is ()\n result[ind] = r\n else:\n result[ind][valid_ind] = r\n # format output\n if self.n_outputs == 1:\n outputs = np.asarray(result[0])\n else:\n outputs = [np.asarray(r) for r in result]\n else:\n outputs = self.evaluate(*chain(inputs, parameters))\n if self.n_outputs == 1:\n outputs = (outputs,)\n\n outputs = self.prepare_outputs(format_info, *outputs, **kwargs)\n\n # If input values were quantities, we use return_units to cast\n # the return values to the units specified by return_units.\n if self.return_units and inputs_are_quantity:\n # We allow a non-iterable unit only if there is one output\n if self.n_outputs == 1 and not isiterable(self.return_units):\n return_units = {self.outputs[0]: self.return_units}\n else:\n return_units = self.return_units\n\n outputs = tuple([Quantity(out, return_units[out_name], subok=True)\n for out, out_name in zip(outputs, self.outputs)])\n\n if self.n_outputs == 1:\n return outputs[0]\n else:\n return outputs\n\n # *** Arithmetic operators for creating compound models ***\n __add__ = _model_oper('+')\n __sub__ = _model_oper('-')\n __mul__ = _model_oper('*')\n __truediv__ = _model_oper('/')\n __pow__ = _model_oper('**')\n __or__ = _model_oper('|')\n __and__ = _model_oper('&')\n\n if six.PY2:\n __div__ = _model_oper('/')\n\n # *** Properties ***\n @property\n def name(self):\n \"\"\"User-provided name for this model instance.\"\"\"\n\n return self._name\n\n @name.setter\n def name(self, val):\n \"\"\"Assign a (new) name to this model.\"\"\"\n\n self._name = val\n\n @property\n def n_inputs(self):\n \"\"\"\n The number of inputs to this model.\n\n Equivalent to ``len(model.inputs)``.\n \"\"\"\n\n return len(self.inputs)\n\n @property\n def n_outputs(self):\n \"\"\"\n The number of outputs from this model.\n\n Equivalent to ``len(model.outputs)``.\n \"\"\"\n return len(self.outputs)\n\n @property\n def model_set_axis(self):\n \"\"\"\n The index of the model set axis--that is the axis of a parameter array\n that pertains to which model a parameter value pertains to--as\n specified when the model was initialized.\n\n See the documentation on `Model Sets\n <http://docs.astropy.org/en/stable/modeling/models.html#model-sets>`_\n for more details.\n \"\"\"\n\n return self._model_set_axis\n\n @property\n def param_sets(self):\n \"\"\"\n Return parameters as a pset.\n\n This is a list with one item per parameter set, which is an array of\n that parameter's values across all parameter sets, with the last axis\n associated with the parameter set.\n \"\"\"\n\n return self._param_sets()\n\n @property\n def parameters(self):\n \"\"\"\n A flattened array of all parameter values in all parameter sets.\n\n Fittable parameters maintain this list and fitters modify it.\n \"\"\"\n\n # Currently the sequence of a model's parameters must be contiguous\n # within the _parameters array (which may be a view of a larger array,\n # for example when taking a sub-expression of a compound model), so\n # the assumption here is reliable:\n if not self.param_names:\n # Trivial, but not unheard of\n return self._parameters\n\n start = self._param_metrics[self.param_names[0]]['slice'].start\n stop = self._param_metrics[self.param_names[-1]]['slice'].stop\n\n return self._parameters[start:stop]\n\n @parameters.setter\n def parameters(self, value):\n \"\"\"\n Assigning to this attribute updates the parameters array rather than\n replacing it.\n \"\"\"\n\n if not self.param_names:\n return\n\n start = self._param_metrics[self.param_names[0]]['slice'].start\n stop = self._param_metrics[self.param_names[-1]]['slice'].stop\n\n try:\n value = np.array(value).flatten()\n self._parameters[start:stop] = value\n except ValueError as e:\n raise InputParameterError(\n \"Input parameter values not compatible with the model \"\n \"parameters array: {0}\".format(e))\n\n @property\n def fixed(self):\n \"\"\"\n A `dict` mapping parameter names to their fixed constraint.\n \"\"\"\n\n return self._constraints['fixed']\n\n @property\n def tied(self):\n \"\"\"\n A `dict` mapping parameter names to their tied constraint.\n \"\"\"\n\n return self._constraints['tied']\n\n @property\n def bounds(self):\n \"\"\"\n A `dict` mapping parameter names to their upper and lower bounds as\n ``(min, max)`` tuples or ``[min, max]`` lists.\n \"\"\"\n\n return self._constraints['bounds']\n\n @property\n def eqcons(self):\n \"\"\"List of parameter equality constraints.\"\"\"\n\n return self._constraints['eqcons']\n\n @property\n def ineqcons(self):\n \"\"\"List of parameter inequality constraints.\"\"\"\n\n return self._constraints['ineqcons']\n\n @property\n def inverse(self):\n \"\"\"\n Returns a new `~astropy.modeling.Model` instance which performs the\n inverse transform, if an analytic inverse is defined for this model.\n\n Even on models that don't have an inverse defined, this property can be\n set with a manually-defined inverse, such a pre-computed or\n experimentally determined inverse (often given as a\n `~astropy.modeling.polynomial.PolynomialModel`, but not by\n requirement).\n\n A custom inverse can be deleted with ``del model.inverse``. In this\n case the model's inverse is reset to its default, if a default exists\n (otherwise the default is to raise `NotImplementedError`).\n\n Note to authors of `~astropy.modeling.Model` subclasses: To define an\n inverse for a model simply override this property to return the\n appropriate model representing the inverse. The machinery that will\n make the inverse manually-overridable is added automatically by the\n base class.\n \"\"\"\n\n if self._user_inverse is not None:\n return self._user_inverse\n elif self._inverse is not None:\n return self._inverse()\n\n raise NotImplementedError(\"An analytical inverse transform has not \"\n \"been implemented for this model.\")\n\n @inverse.setter\n def inverse(self, value):\n if not isinstance(value, (Model, type(None))):\n raise ValueError(\n \"The ``inverse`` attribute may be assigned a `Model` \"\n \"instance or `None` (where `None` explicitly forces the \"\n \"model to have no inverse.\")\n\n self._user_inverse = value\n\n @inverse.deleter\n def inverse(self):\n \"\"\"\n Resets the model's inverse to its default (if one exists, otherwise\n the model will have no inverse).\n \"\"\"\n\n del self._user_inverse\n\n @property\n def has_user_inverse(self):\n \"\"\"\n A flag indicating whether or not a custom inverse model has been\n assigned to this model by a user, via assignment to ``model.inverse``.\n \"\"\"\n\n return self._user_inverse is not None\n\n @property\n def bounding_box(self):\n r\"\"\"\n A `tuple` of length `n_inputs` defining the bounding box limits, or\n `None` for no bounding box.\n\n The default limits are given by a ``bounding_box`` property or method\n defined in the class body of a specific model. If not defined then\n this property just raises `NotImplementedError` by default (but may be\n assigned a custom value by a user). ``bounding_box`` can be set\n manually to an array-like object of shape ``(model.n_inputs, 2)``. For\n further usage, see :ref:`bounding-boxes`\n\n The limits are ordered according to the `numpy` indexing\n convention, and are the reverse of the model input order,\n e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:\n\n * for 1D: ``(x_low, x_high)``\n * for 2D: ``((y_low, y_high), (x_low, x_high))``\n * for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``\n\n Examples\n --------\n\n Setting the ``bounding_box`` limits for a 1D and 2D model:\n\n >>> from astropy.modeling.models import Gaussian1D, Gaussian2D\n >>> model_1d = Gaussian1D()\n >>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)\n >>> model_1d.bounding_box = (-5, 5)\n >>> model_2d.bounding_box = ((-6, 6), (-5, 5))\n\n Setting the bounding_box limits for a user-defined 3D `custom_model`:\n\n >>> from astropy.modeling.models import custom_model\n >>> def const3d(x, y, z, amp=1):\n ... return amp\n ...\n >>> Const3D = custom_model(const3d)\n >>> model_3d = Const3D()\n >>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))\n\n To reset ``bounding_box`` to its default limits just delete the\n user-defined value--this will reset it back to the default defined\n on the class:\n\n >>> del model_1d.bounding_box\n\n To disable the bounding box entirely (including the default),\n set ``bounding_box`` to `None`:\n\n >>> model_1d.bounding_box = None\n >>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"astropy\\modeling\\core.py\", line 980, in bounding_box\n \"No bounding box is defined for this model (note: the \"\n NotImplementedError: No bounding box is defined for this model (note:\n the bounding box was explicitly disabled for this model; use `del\n model.bounding_box` to restore the default bounding box, if one is\n defined for this model).\n \"\"\"\n\n if self._user_bounding_box is not None:\n if self._user_bounding_box is NotImplemented:\n raise NotImplementedError(\n \"No bounding box is defined for this model (note: the \"\n \"bounding box was explicitly disabled for this model; \"\n \"use `del model.bounding_box` to restore the default \"\n \"bounding box, if one is defined for this model).\")\n return self._user_bounding_box\n elif self._bounding_box is None:\n raise NotImplementedError(\n \"No bounding box is defined for this model.\")\n elif isinstance(self._bounding_box, _BoundingBox):\n # This typically implies a hard-coded bounding box. This will\n # probably be rare, but it is an option\n return self._bounding_box\n elif isinstance(self._bounding_box, types.MethodType):\n return self._bounding_box()\n else:\n # The only other allowed possibility is that it's a _BoundingBox\n # subclass, so we call it with its default arguments and return an\n # instance of it (that can be called to recompute the bounding box\n # with any optional parameters)\n # (In other words, in this case self._bounding_box is a *class*)\n bounding_box = self._bounding_box((), _model=self)()\n return self._bounding_box(bounding_box, _model=self)\n\n @bounding_box.setter\n def bounding_box(self, bounding_box):\n \"\"\"\n Assigns the bounding box limits.\n \"\"\"\n\n if bounding_box is None:\n cls = None\n # We use this to explicitly set an unimplemented bounding box (as\n # opposed to no user bounding box defined)\n bounding_box = NotImplemented\n elif (isinstance(self._bounding_box, type) and\n issubclass(self._bounding_box, _BoundingBox)):\n cls = self._bounding_box\n else:\n cls = _BoundingBox\n\n if cls is not None:\n try:\n bounding_box = cls.validate(self, bounding_box)\n except ValueError as exc:\n raise ValueError(exc.args[0])\n\n self._user_bounding_box = bounding_box\n\n @bounding_box.deleter\n def bounding_box(self):\n self._user_bounding_box = None\n\n @property\n def has_user_bounding_box(self):\n \"\"\"\n A flag indicating whether or not a custom bounding_box has been\n assigned to this model by a user, via assignment to\n ``model.bounding_box``.\n \"\"\"\n\n return self._user_bounding_box is not None\n\n # *** Public methods ***\n\n def without_units_for_data(self, **kwargs):\n \"\"\"\n Return an instance of the model for which the parameter values have been\n converted to the right units for the data, then the units have been\n stripped away.\n\n The input and output Quantity objects should be given as keyword\n arguments.\n\n Notes\n -----\n\n This method is needed in order to be able to fit models with units in\n the parameters, since we need to temporarily strip away the units from\n the model during the fitting (which might be done by e.g. scipy\n functions).\n\n The units that the parameters should be converted to are not necessarily\n the units of the input data, but are derived from them. Model subclasses\n that want fitting to work in the presence of quantities need to define a\n _parameter_units_for_data_units method that takes the input and output\n units (as two dictionaries) and returns a dictionary giving the target\n units for each parameter.\n \"\"\"\n\n model = self.copy()\n\n inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)\n for inp in self.inputs if kwargs[inp] is not None}\n\n outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)\n for out in self.outputs if kwargs[out] is not None}\n\n parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)\n\n for name, unit in parameter_units.items():\n parameter = getattr(model, name)\n if parameter.unit is not None:\n parameter.value = parameter.quantity.to(unit).value\n parameter._set_unit(None, force=True)\n\n return model\n\n def with_units_from_data(self, **kwargs):\n \"\"\"\n Return an instance of the model which has units for which the parameter\n values are compatible with the data units specified.\n\n The input and output Quantity objects should be given as keyword\n arguments.\n\n Notes\n -----\n\n This method is needed in order to be able to fit models with units in\n the parameters, since we need to temporarily strip away the units from\n the model during the fitting (which might be done by e.g. scipy\n functions).\n\n The units that the parameters will gain are not necessarily the units of\n the input data, but are derived from them. Model subclasses that want\n fitting to work in the presence of quantities need to define a\n _parameter_units_for_data_units method that takes the input and output\n units (as two dictionaries) and returns a dictionary giving the target\n units for each parameter.\n \"\"\"\n\n model = self.copy()\n\n inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)\n for inp in self.inputs if kwargs[inp] is not None}\n\n outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)\n for out in self.outputs if kwargs[out] is not None}\n\n parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)\n\n # We are adding units to parameters that already have a value, but we\n # don't want to convert the parameter, just add the unit directly, hence\n # the call to _set_unit.\n for name, unit in parameter_units.items():\n parameter = getattr(model, name)\n parameter._set_unit(unit, force=True)\n\n return model\n\n @property\n def _has_units(self):\n # Returns True if any of the parameters have units\n for param in self.param_names:\n if getattr(self, param).unit is not None:\n return True\n else:\n return False\n\n @property\n def _supports_unit_fitting(self):\n # If the model has a '_parameter_units_for_data_units' method, this\n # indicates that we have enough information to strip the units away\n # and add them back after fitting, when fitting quantities\n return hasattr(self, '_parameter_units_for_data_units')\n\n @abc.abstractmethod\n def evaluate(self, *args, **kwargs):\n \"\"\"Evaluate the model on some input variables.\"\"\"\n\n def sum_of_implicit_terms(self, *args, **kwargs):\n \"\"\"\n Evaluate the sum of any implicit model terms on some input variables.\n This includes any fixed terms used in evaluating a linear model that\n do not have corresponding parameters exposed to the user. The\n prototypical case is `astropy.modeling.functional_models.Shift`, which\n corresponds to a function y = a + bx, where b=1 is intrinsically fixed\n by the type of model, such that sum_of_implicit_terms(x) == x. This\n method is needed by linear fitters to correct the dependent variable\n for the implicit term(s) when solving for the remaining terms\n (ie. a = y - bx).\n \"\"\"\n\n def render(self, out=None, coords=None):\n \"\"\"\n Evaluate a model at fixed positions, respecting the ``bounding_box``.\n\n The key difference relative to evaluating the model directly is that\n this method is limited to a bounding box if the `Model.bounding_box`\n attribute is set.\n\n Parameters\n ----------\n out : `numpy.ndarray`, optional\n An array that the evaluated model will be added to. If this is not\n given (or given as ``None``), a new array will be created.\n coords : array-like, optional\n An array to be used to translate from the model's input coordinates\n to the ``out`` array. It should have the property that\n ``self(coords)`` yields the same shape as ``out``. If ``out`` is\n not specified, ``coords`` will be used to determine the shape of the\n returned array. If this is not provided (or None), the model will be\n evaluated on a grid determined by `Model.bounding_box`.\n\n Returns\n -------\n out : `numpy.ndarray`\n The model added to ``out`` if ``out`` is not ``None``, or else a\n new array from evaluating the model over ``coords``.\n If ``out`` and ``coords`` are both `None`, the returned array is\n limited to the `Model.bounding_box` limits. If\n `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.\n\n Raises\n ------\n ValueError\n If ``coords`` are not given and the the `Model.bounding_box` of this\n model is not set.\n\n Examples\n --------\n :ref:`bounding-boxes`\n \"\"\"\n\n try:\n bbox = self.bounding_box\n except NotImplementedError:\n bbox = None\n\n ndim = self.n_inputs\n\n if (coords is None) and (out is None) and (bbox is None):\n raise ValueError('If no bounding_box is set, '\n 'coords or out must be input.')\n\n # for consistent indexing\n if ndim == 1:\n if coords is not None:\n coords = [coords]\n if bbox is not None:\n bbox = [bbox]\n\n if coords is not None:\n coords = np.asanyarray(coords, dtype=float)\n # Check dimensions match out and model\n assert len(coords) == ndim\n if out is not None:\n if coords[0].shape != out.shape:\n raise ValueError('inconsistent shape of the output.')\n else:\n out = np.zeros(coords[0].shape)\n\n if out is not None:\n out = np.asanyarray(out, dtype=float)\n if out.ndim != ndim:\n raise ValueError('the array and model must have the same '\n 'number of dimensions.')\n\n if bbox is not None:\n # assures position is at center pixel, important when using add_array\n pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))\n for bb in bbox]).astype(int).T\n pos, delta = pd\n\n if coords is not None:\n sub_shape = tuple(delta * 2 + 1)\n sub_coords = np.array([extract_array(c, sub_shape, pos)\n for c in coords])\n else:\n limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]\n sub_coords = np.mgrid[limits]\n\n sub_coords = sub_coords[::-1]\n\n if out is None:\n out = self(*sub_coords)\n else:\n try:\n out = add_array(out, self(*sub_coords), pos)\n except ValueError:\n raise ValueError(\n 'The `bounding_box` is larger than the input out in '\n 'one or more dimensions. Set '\n '`model.bounding_box = None`.')\n else:\n if coords is None:\n im_shape = out.shape\n limits = [slice(i) for i in im_shape]\n coords = np.mgrid[limits]\n\n coords = coords[::-1]\n\n out += self(*coords)\n\n return out\n\n @property\n def input_units(self):\n \"\"\"\n This property is used to indicate what units or sets of units the\n evaluate method expects, and returns a dictionary mapping inputs to\n units (or `None` if any units are accepted).\n\n Model sub-classes can also use function annotations in evaluate to\n indicate valid input units, in which case this property should\n not be overriden since it will return the input units based on the\n annotations.\n \"\"\"\n if hasattr(self, '_input_units'):\n return self._input_units\n elif hasattr(self.evaluate, '__annotations__'):\n annotations = self.evaluate.__annotations__.copy()\n annotations.pop('return', None)\n if annotations:\n # If there are not annotations for all inputs this will error.\n return dict((name, annotations[name]) for name in self.inputs)\n else:\n # None means any unit is accepted\n return None\n\n @input_units.setter\n def input_units(self, input_units):\n self._input_units = input_units\n\n @property\n def return_units(self):\n \"\"\"\n This property is used to indicate what units or sets of units the output\n of evaluate should be in, and returns a dictionary mapping outputs to\n units (or `None` if any units are accepted).\n\n Model sub-classes can also use function annotations in evaluate to\n indicate valid output units, in which case this property should not be\n overriden since it will return the return units based on the\n annotations.\n \"\"\"\n if hasattr(self, '_return_units'):\n return self._return_units\n elif hasattr(self.evaluate, '__annotations__'):\n return self.evaluate.__annotations__.get('return', None)\n else:\n # None means any unit is accepted\n return None\n\n @return_units.setter\n def return_units(self, return_units):\n self._return_units = return_units\n\n def prepare_inputs(self, *inputs, **kwargs):\n \"\"\"\n This method is used in `~astropy.modeling.Model.__call__` to ensure\n that all the inputs to the model can be broadcast into compatible\n shapes (if one or both of them are input as arrays), particularly if\n there are more than one parameter sets. This also makes sure that (if\n applicable) the units of the input will be compatible with the evaluate\n method.\n \"\"\"\n\n # When we instantiate the model class, we make sure that __call__ can\n # take the following two keyword arguments.\n model_set_axis = kwargs.pop('model_set_axis', None)\n equivalencies = kwargs.pop('equivalencies', None)\n\n if model_set_axis is None:\n # By default the model_set_axis for the input is assumed to be the\n # same as that for the parameters the model was defined with\n # TODO: Ensure that negative model_set_axis arguments are respected\n model_set_axis = self.model_set_axis\n\n n_models = len(self)\n\n params = [getattr(self, name) for name in self.param_names]\n inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]\n\n _validate_input_shapes(inputs, self.inputs, n_models,\n model_set_axis, self.standard_broadcasting)\n\n # Check that the units are correct, if applicable\n\n if self.input_units is not None:\n\n # We combine any instance-level input equivalencies with user\n # specified ones at call-time.\n input_units_equivalencies = _combine_equivalency_dict(self.inputs,\n equivalencies,\n self.input_units_equivalencies)\n\n # We now iterate over the different inputs and make sure that their\n # units are consistent with those specified in input_units.\n for i in range(len(inputs)):\n\n input_name = self.inputs[i]\n input_unit = self.input_units.get(input_name, None)\n\n if input_unit is None:\n continue\n\n if isinstance(inputs[i], Quantity):\n\n # We check for consistency of the units with input_units,\n # taking into account any equivalencies\n\n if inputs[i].unit.is_equivalent(input_unit, equivalencies=input_units_equivalencies[input_name]):\n\n # If equivalencies have been specified, we need to\n # convert the input to the input units - this is because\n # some equivalencies are non-linear, and we need to be\n # sure that we evaluate the model in its own frame\n # of reference. If input_units_strict is set, we also\n # need to convert to the input units.\n if len(input_units_equivalencies) > 0 or self.input_units_strict:\n inputs[i] = inputs[i].to(input_unit, equivalencies=input_units_equivalencies[input_name])\n\n else:\n\n # We consider the following two cases separately so as\n # to be able to raise more appropriate/nicer exceptions\n\n if input_unit is dimensionless_unscaled:\n raise UnitsError(\"Units of input '{0}', {1} ({2}), could not be \"\n \"converted to required dimensionless \"\n \"input\".format(self.inputs[i],\n inputs[i].unit,\n inputs[i].unit.physical_type))\n else:\n raise UnitsError(\"Units of input '{0}', {1} ({2}), could not be \"\n \"converted to required input units of \"\n \"{3} ({4})\".format(self.inputs[i],\n inputs[i].unit,\n inputs[i].unit.physical_type,\n input_unit,\n input_unit.physical_type))\n else:\n\n # If we allow dimensionless input, we add the units to the\n # input values without conversion, otherwise we raise an\n # exception.\n\n if (not self.input_units_allow_dimensionless and\n input_unit is not dimensionless_unscaled and input_unit is not None):\n if np.any(inputs[i] != 0):\n raise UnitsError(\"Units of input '{0}', (dimensionless), could not be \"\n \"converted to required input units of \"\n \"{1} ({2})\".format(self.inputs[i], input_unit,\n input_unit.physical_type))\n\n # The input formatting required for single models versus a multiple\n # model set are different enough that they've been split into separate\n # subroutines\n if n_models == 1:\n return _prepare_inputs_single_model(self, params, inputs,\n **kwargs)\n else:\n return _prepare_inputs_model_set(self, params, inputs, n_models,\n model_set_axis, **kwargs)\n\n def prepare_outputs(self, format_info, *outputs, **kwargs):\n if len(self) == 1:\n return _prepare_outputs_single_model(self, outputs, format_info)\n else:\n return _prepare_outputs_model_set(self, outputs, format_info)\n\n def copy(self):\n \"\"\"\n Return a copy of this model.\n\n Uses a deep copy so that all model attributes, including parameter\n values, are copied as well.\n \"\"\"\n\n return copy.deepcopy(self)\n\n @sharedmethod\n def rename(self, name):\n \"\"\"\n Return a copy of this model with a new name.\n \"\"\"\n new_model = self.copy()\n new_model._name = name\n return new_model\n\n @sharedmethod\n def n_submodels(self):\n \"\"\"\n Return the number of components in a single model, which is\n obviously 1.\n \"\"\"\n return 1\n\n # *** Internal methods ***\n @sharedmethod\n def _from_existing(self, existing, param_names):\n \"\"\"\n Creates a new instance of ``cls`` that shares its underlying parameter\n values with an existing model instance given by ``existing``.\n\n This is used primarily by compound models to return a view of an\n individual component of a compound model. ``param_names`` should be\n the names of the parameters in the *existing* model to use as the\n parameters in this new model. Its length should equal the number of\n parameters this model takes, so that it can map parameters on the\n existing model to parameters on this model one-to-one.\n \"\"\"\n\n # Basically this is an alternative __init__\n if isinstance(self, type):\n # self is a class, not an instance\n needs_initialization = True\n dummy_args = (0,) * len(param_names)\n self = self.__new__(self, *dummy_args)\n else:\n needs_initialization = False\n self = self.copy()\n\n aliases = dict(zip(self.param_names, param_names))\n # This is basically an alternative _initialize_constraints\n constraints = {}\n for cons_type in self.parameter_constraints:\n orig = existing._constraints[cons_type]\n constraints[cons_type] = AliasDict(orig, aliases)\n\n self._constraints = constraints\n\n self._n_models = existing._n_models\n self._model_set_axis = existing._model_set_axis\n self._parameters = existing._parameters\n\n self._param_metrics = defaultdict(dict)\n for param_a, param_b in six.iteritems(aliases):\n # Take the param metrics info for the giving parameters in the\n # existing model, and hand them to the appropriate parameters in\n # the new model\n self._param_metrics[param_a] = existing._param_metrics[param_b]\n\n if needs_initialization:\n self.__init__(*dummy_args)\n\n return self\n\n def _initialize_constraints(self, kwargs):\n \"\"\"\n Pop parameter constraint values off the keyword arguments passed to\n `Model.__init__` and store them in private instance attributes.\n \"\"\"\n\n if hasattr(self, '_constraints'):\n # Skip constraint initialization if it has already been handled via\n # an alternate initialization\n return\n\n self._constraints = {}\n # Pop any constraints off the keyword arguments\n for constraint in self.parameter_constraints:\n values = kwargs.pop(constraint, {})\n self._constraints[constraint] = values.copy()\n\n # Update with default parameter constraints\n for param_name in self.param_names:\n param = getattr(self, param_name)\n\n # Parameters don't have all constraint types\n value = getattr(param, constraint)\n if value is not None:\n self._constraints[constraint][param_name] = value\n\n for constraint in self.model_constraints:\n values = kwargs.pop(constraint, [])\n self._constraints[constraint] = values\n\n def _initialize_parameters(self, args, kwargs):\n \"\"\"\n Initialize the _parameters array that stores raw parameter values for\n all parameter sets for use with vectorized fitting algorithms; on\n FittableModels the _param_name attributes actually just reference\n slices of this array.\n \"\"\"\n\n if hasattr(self, '_parameters'):\n # Skip parameter initialization if it has already been handled via\n # an alternate initialization\n return\n\n n_models = kwargs.pop('n_models', None)\n\n if not (n_models is None or\n (isinstance(n_models, (int, np.integer)) and n_models >= 1)):\n raise ValueError(\n \"n_models must be either None (in which case it is \"\n \"determined from the model_set_axis of the parameter initial \"\n \"values) or it must be a positive integer \"\n \"(got {0!r})\".format(n_models))\n\n model_set_axis = kwargs.pop('model_set_axis', None)\n if model_set_axis is None:\n if n_models is not None and n_models > 1:\n # Default to zero\n model_set_axis = 0\n else:\n # Otherwise disable\n model_set_axis = False\n else:\n if not (model_set_axis is False or\n (isinstance(model_set_axis, int) and\n not isinstance(model_set_axis, bool))):\n raise ValueError(\n \"model_set_axis must be either False or an integer \"\n \"specifying the parameter array axis to map to each \"\n \"model in a set of models (got {0!r}).\".format(\n model_set_axis))\n\n # Process positional arguments by matching them up with the\n # corresponding parameters in self.param_names--if any also appear as\n # keyword arguments this presents a conflict\n params = {}\n if len(args) > len(self.param_names):\n raise TypeError(\n \"{0}.__init__() takes at most {1} positional arguments ({2} \"\n \"given)\".format(self.__class__.__name__, len(self.param_names),\n len(args)))\n\n self._model_set_axis = model_set_axis\n self._param_metrics = defaultdict(dict)\n\n for idx, arg in enumerate(args):\n if arg is None:\n # A value of None implies using the default value, if exists\n continue\n # We use quantity_asanyarray here instead of np.asanyarray because\n # if any of the arguments are quantities, we need to return a\n # Quantity object not a plain Numpy array.\n params[self.param_names[idx]] = quantity_asanyarray(arg, dtype=np.float)\n\n # At this point the only remaining keyword arguments should be\n # parameter names; any others are in error.\n for param_name in self.param_names:\n if param_name in kwargs:\n if param_name in params:\n raise TypeError(\n \"{0}.__init__() got multiple values for parameter \"\n \"{1!r}\".format(self.__class__.__name__, param_name))\n value = kwargs.pop(param_name)\n if value is None:\n continue\n # We use quantity_asanyarray here instead of np.asanyarray because\n # if any of the arguments are quantities, we need to return a\n # Quantity object not a plain Numpy array.\n params[param_name] = quantity_asanyarray(value, dtype=np.float)\n\n if kwargs:\n # If any keyword arguments were left over at this point they are\n # invalid--the base class should only be passed the parameter\n # values, constraints, and param_dim\n for kwarg in kwargs:\n # Just raise an error on the first unrecognized argument\n raise TypeError(\n '{0}.__init__() got an unrecognized parameter '\n '{1!r}'.format(self.__class__.__name__, kwarg))\n\n # Determine the number of model sets: If the model_set_axis is\n # None then there is just one parameter set; otherwise it is determined\n # by the size of that axis on the first parameter--if the other\n # parameters don't have the right number of axes or the sizes of their\n # model_set_axis don't match an error is raised\n if model_set_axis is not False and n_models != 1 and params:\n max_ndim = 0\n if model_set_axis < 0:\n min_ndim = abs(model_set_axis)\n else:\n min_ndim = model_set_axis + 1\n\n for name, value in six.iteritems(params):\n param_ndim = np.ndim(value)\n if param_ndim < min_ndim:\n raise InputParameterError(\n \"All parameter values must be arrays of dimension \"\n \"at least {0} for model_set_axis={1} (the value \"\n \"given for {2!r} is only {3}-dimensional)\".format(\n min_ndim, model_set_axis, name, param_ndim))\n\n max_ndim = max(max_ndim, param_ndim)\n\n if n_models is None:\n # Use the dimensions of the first parameter to determine\n # the number of model sets\n n_models = value.shape[model_set_axis]\n elif value.shape[model_set_axis] != n_models:\n raise InputParameterError(\n \"Inconsistent dimensions for parameter {0!r} for \"\n \"{1} model sets. The length of axis {2} must be the \"\n \"same for all input parameter values\".format(\n name, n_models, model_set_axis))\n\n self._check_param_broadcast(params, max_ndim)\n else:\n if n_models is None:\n n_models = 1\n\n self._check_param_broadcast(params, None)\n\n self._n_models = n_models\n self._initialize_parameter_values(params)\n\n def _initialize_parameter_values(self, params):\n # self._param_metrics should have been initialized in\n # self._initialize_parameters\n param_metrics = self._param_metrics\n total_size = 0\n\n for name in self.param_names:\n unit = None\n param_descr = getattr(self, name)\n\n if params.get(name) is None:\n default = param_descr.default\n\n if default is None:\n # No value was supplied for the parameter and the\n # parameter does not have a default, therefore the model\n # is underspecified\n raise TypeError(\n \"{0}.__init__() requires a value for parameter \"\n \"{1!r}\".format(self.__class__.__name__, name))\n\n value = params[name] = default\n unit = param_descr.unit\n else:\n value = params[name]\n if isinstance(value, Quantity):\n unit = value.unit\n else:\n unit = None\n\n param_size = np.size(value)\n param_shape = np.shape(value)\n\n param_slice = slice(total_size, total_size + param_size)\n\n param_metrics[name]['slice'] = param_slice\n param_metrics[name]['shape'] = param_shape\n\n if unit is None and param_descr.unit is not None:\n raise InputParameterError(\n \"{0}.__init__() requires a Quantity for parameter \"\n \"{1!r}\".format(self.__class__.__name__, name))\n\n param_metrics[name]['orig_unit'] = unit\n param_metrics[name]['raw_unit'] = None\n if param_descr._setter is not None:\n _val = param_descr._setter(value)\n if isinstance(_val, Quantity):\n param_metrics[name]['raw_unit'] = _val.unit\n else:\n param_metrics[name]['raw_unit'] = None\n total_size += param_size\n\n self._param_metrics = param_metrics\n self._parameters = np.empty(total_size, dtype=np.float64)\n\n # Now set the parameter values (this will also fill\n # self._parameters)\n # TODO: This is a bit ugly, but easier to deal with than how this was\n # done previously. There's still lots of opportunity for refactoring\n # though, in particular once we move the _get/set_model_value methods\n # out of Parameter and into Model (renaming them\n # _get/set_parameter_value)\n for name, value in params.items():\n # value here may be a Quantity object.\n param_descr = getattr(self, name)\n unit = param_descr.unit\n value = np.array(value)\n orig_unit = param_metrics[name]['orig_unit']\n if param_descr._setter is not None:\n if unit is not None:\n value = np.asarray(param_descr._setter(value * orig_unit).value)\n else:\n value = param_descr._setter(value)\n self._parameters[param_metrics[name]['slice']] = value.ravel()\n\n # Finally validate all the parameters; we do this last so that\n # validators that depend on one of the other parameters' values will\n # work\n for name in params:\n param_descr = getattr(self, name)\n param_descr.validator(param_descr.value)\n\n def _check_param_broadcast(self, params, max_ndim):\n \"\"\"\n This subroutine checks that all parameter arrays can be broadcast\n against each other, and determines the shapes parameters must have in\n order to broadcast correctly.\n\n If model_set_axis is None this merely checks that the parameters\n broadcast and returns an empty dict if so. This mode is only used for\n single model sets.\n \"\"\"\n\n all_shapes = []\n param_names = []\n model_set_axis = self._model_set_axis\n\n for name in self.param_names:\n # Previously this just used iteritems(params), but we loop over all\n # param_names instead just to ensure some determinism in the\n # ordering behavior\n if name not in params:\n continue\n\n value = params[name]\n param_names.append(name)\n # We've already checked that each parameter array is compatible in\n # the model_set_axis dimension, but now we need to check the\n # dimensions excluding that axis\n # Split the array dimensions into the axes before model_set_axis\n # and after model_set_axis\n param_shape = np.shape(value)\n\n param_ndim = len(param_shape)\n if max_ndim is not None and param_ndim < max_ndim:\n # All arrays have the same number of dimensions up to the\n # model_set_axis dimension, but after that they may have a\n # different number of trailing axes. The number of trailing\n # axes must be extended for mutual compatibility. For example\n # if max_ndim = 3 and model_set_axis = 0, an array with the\n # shape (2, 2) must be extended to (2, 1, 2). However, an\n # array with shape (2,) is extended to (2, 1).\n new_axes = (1,) * (max_ndim - param_ndim)\n\n if model_set_axis < 0:\n # Just need to prepend axes to make up the difference\n broadcast_shape = new_axes + param_shape\n else:\n broadcast_shape = (param_shape[:model_set_axis + 1] +\n new_axes +\n param_shape[model_set_axis + 1:])\n self._param_metrics[name]['broadcast_shape'] = broadcast_shape\n all_shapes.append(broadcast_shape)\n else:\n all_shapes.append(param_shape)\n\n # Now check mutual broadcastability of all shapes\n try:\n check_broadcast(*all_shapes)\n except IncompatibleShapeError as exc:\n shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args\n param_a = param_names[shape_a_idx]\n param_b = param_names[shape_b_idx]\n\n raise InputParameterError(\n \"Parameter {0!r} of shape {1!r} cannot be broadcast with \"\n \"parameter {2!r} of shape {3!r}. All parameter arrays \"\n \"must have shapes that are mutually compatible according \"\n \"to the broadcasting rules.\".format(param_a, shape_a,\n param_b, shape_b))\n\n def _param_sets(self, raw=False, units=False):\n \"\"\"\n Implementation of the Model.param_sets property.\n\n This internal implementation has a ``raw`` argument which controls\n whether or not to return the raw parameter values (i.e. the values that\n are actually stored in the ._parameters array, as opposed to the values\n displayed to users. In most cases these are one in the same but there\n are currently a few exceptions.\n\n Note: This is notably an overcomplicated device and may be removed\n entirely in the near future.\n \"\"\"\n\n param_metrics = self._param_metrics\n values = []\n shapes = []\n for name in self.param_names:\n param = getattr(self, name)\n\n if raw:\n value = param._raw_value\n else:\n value = param.value\n\n broadcast_shape = param_metrics[name].get('broadcast_shape')\n if broadcast_shape is not None:\n value = value.reshape(broadcast_shape)\n\n shapes.append(np.shape(value))\n\n if len(self) == 1:\n # Add a single param set axis to the parameter's value (thus\n # converting scalars to shape (1,) array values) for\n # consistency\n value = np.array([value])\n\n if units:\n if raw and self._param_metrics[name]['raw_unit'] is not None:\n unit = self._param_metrics[name]['raw_unit']\n else:\n unit = param.unit\n if unit is not None:\n value = Quantity(value, unit)\n\n values.append(value)\n\n if len(set(shapes)) != 1 or units:\n # If the parameters are not all the same shape, converting to an\n # array is going to produce an object array\n # However the way Numpy creates object arrays is tricky in that it\n # will recurse into array objects in the list and break them up\n # into separate objects. Doing things this way ensures a 1-D\n # object array the elements of which are the individual parameter\n # arrays. There's not much reason to do this over returning a list\n # except for consistency\n psets = np.empty(len(values), dtype=object)\n psets[:] = values\n return psets\n\n # TODO: Returning an array from this method may be entirely pointless\n # for internal use--perhaps only the external param_sets method should\n # return an array (and just for backwards compat--I would prefer to\n # maybe deprecate that method)\n\n return np.array(values)\n\n def _format_repr(self, args=[], kwargs={}, defaults={}):\n \"\"\"\n Internal implementation of ``__repr__``.\n\n This is separated out for ease of use by subclasses that wish to\n override the default ``__repr__`` while keeping the same basic\n formatting.\n \"\"\"\n\n # TODO: I think this could be reworked to preset model sets better\n\n parts = [repr(a) for a in args]\n\n parts.extend(\n \"{0}={1}\".format(name,\n param_repr_oneline(getattr(self, name)))\n for name in self.param_names)\n\n if self.name is not None:\n parts.append('name={0!r}'.format(self.name))\n\n for kwarg, value in kwargs.items():\n if kwarg in defaults and defaults[kwarg] != value:\n continue\n parts.append('{0}={1!r}'.format(kwarg, value))\n\n if len(self) > 1:\n parts.append(\"n_models={0}\".format(len(self)))\n\n return '<{0}({1})>'.format(self.__class__.__name__, ', '.join(parts))\n\n def _format_str(self, keywords=[]):\n \"\"\"\n Internal implementation of ``__str__``.\n\n This is separated out for ease of use by subclasses that wish to\n override the default ``__str__`` while keeping the same basic\n formatting.\n \"\"\"\n\n default_keywords = [\n ('Model', self.__class__.__name__),\n ('Name', self.name),\n ('Inputs', self.inputs),\n ('Outputs', self.outputs),\n ('Model set size', len(self))\n ]\n\n parts = ['{0}: {1}'.format(keyword, value)\n for keyword, value in default_keywords + keywords\n if value is not None]\n\n parts.append('Parameters:')\n\n if len(self) == 1:\n columns = [[getattr(self, name).value]\n for name in self.param_names]\n else:\n columns = [getattr(self, name).value\n for name in self.param_names]\n\n if columns:\n param_table = Table(columns, names=self.param_names)\n # Set units on the columns\n for name in self.param_names:\n param_table[name].unit = getattr(self, name).unit\n parts.append(indent(str(param_table), width=4))\n\n return '\\n'.join(parts)\n\n\nclass FittableModel(Model):\n \"\"\"\n Base class for models that can be fitted using the built-in fitting\n algorithms.\n \"\"\"\n\n linear = False\n # derivative with respect to parameters\n fit_deriv = None\n \"\"\"\n Function (similar to the model's `~Model.evaluate`) to compute the\n derivatives of the model with respect to its parameters, for use by fitting\n algorithms. In other words, this computes the Jacobian matrix with respect\n to the model's parameters.\n \"\"\"\n # Flag that indicates if the model derivatives with respect to parameters\n # are given in columns or rows\n col_fit_deriv = True\n fittable = True\n\n\nclass Fittable1DModel(FittableModel):\n \"\"\"\n Base class for one-dimensional fittable models.\n\n This class provides an easier interface to defining new models.\n Examples can be found in `astropy.modeling.functional_models`.\n \"\"\"\n\n inputs = ('x',)\n outputs = ('y',)\n\n\nclass Fittable2DModel(FittableModel):\n \"\"\"\n Base class for two-dimensional fittable models.\n\n This class provides an easier interface to defining new models.\n Examples can be found in `astropy.modeling.functional_models`.\n \"\"\"\n\n inputs = ('x', 'y')\n outputs = ('z',)\n\n\ndef _make_arithmetic_operator(oper):\n # We don't bother with tuple unpacking here for efficiency's sake, but for\n # documentation purposes:\n #\n # f_eval, f_n_inputs, f_n_outputs = f\n #\n # and similarly for g\n def op(f, g):\n return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])\n\n return op\n\n\ndef _composition_operator(f, g):\n # We don't bother with tuple unpacking here for efficiency's sake, but for\n # documentation purposes:\n #\n # f_eval, f_n_inputs, f_n_outputs = f\n #\n # and similarly for g\n return (lambda inputs, params: g[0](f[0](inputs, params), params),\n f[1], g[2])\n\n\ndef _join_operator(f, g):\n # We don't bother with tuple unpacking here for efficiency's sake, but for\n # documentation purposes:\n #\n # f_eval, f_n_inputs, f_n_outputs = f\n #\n # and similarly for g\n return (lambda inputs, params: (f[0](inputs[:f[1]], params) +\n g[0](inputs[f[1]:], params)),\n f[1] + g[1], f[2] + g[2])\n\n\n# TODO: Support a couple unary operators--at least negation?\nBINARY_OPERATORS = {\n '+': _make_arithmetic_operator(operator.add),\n '-': _make_arithmetic_operator(operator.sub),\n '*': _make_arithmetic_operator(operator.mul),\n '/': _make_arithmetic_operator(operator.truediv),\n '**': _make_arithmetic_operator(operator.pow),\n '|': _composition_operator,\n '&': _join_operator\n}\n\n\n_ORDER_OF_OPERATORS = [('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)]\nOPERATOR_PRECEDENCE = {}\nfor idx, ops in enumerate(_ORDER_OF_OPERATORS):\n for op in ops:\n OPERATOR_PRECEDENCE[op] = idx\ndel idx, op, ops\n\n\nclass _CompoundModelMeta(_ModelMeta):\n _tree = None\n _submodels = None\n _submodel_names = None\n _nextid = 0\n\n _param_names = None\n # _param_map is a mapping of the compound model's generated param names to\n # the parameters of submodels they are associated with. The values in this\n # mapping are (idx, name) tuples were idx is the index of the submodel this\n # parameter is associated with, and name is the same parameter's name on\n # the submodel\n # In principle this will allow compound models to give entirely new names\n # to parameters that don't have to be the same as their original names on\n # the submodels, but right now that isn't taken advantage of\n _param_map = None\n\n _slice_offset = 0\n # When taking slices of a compound model, this keeps track of how offset\n # the first model in the slice is from the first model in the original\n # compound model it was taken from\n\n # This just inverts _param_map, swapping keys with values. This is also\n # useful to have.\n _param_map_inverse = None\n _fittable = None\n\n _evaluate = None\n\n def __getitem__(cls, index):\n index = cls._normalize_index(index)\n\n if isinstance(index, (int, np.integer)):\n return cls._get_submodels()[index]\n else:\n return cls._get_slice(index.start, index.stop)\n\n def __getattr__(cls, attr):\n # Make sure the _tree attribute is set; otherwise we are not looking up\n # an attribute on a concrete compound model class and should just raise\n # the AttributeError\n if cls._tree is not None and attr in cls.param_names:\n cls._init_param_descriptors()\n return getattr(cls, attr)\n\n raise AttributeError(attr)\n\n def __repr__(cls):\n if cls._tree is None:\n # This case is mostly for debugging purposes\n return cls._format_cls_repr()\n\n expression = cls._format_expression()\n components = cls._format_components()\n keywords = [\n ('Expression', expression),\n ('Components', '\\n' + indent(components))\n ]\n\n return cls._format_cls_repr(keywords=keywords)\n\n def __dir__(cls):\n \"\"\"\n Returns a list of attributes defined on a compound model, including\n all of its parameters.\n \"\"\"\n\n try:\n # Annoyingly, this will only work for Python 3.3+\n basedir = super(_CompoundModelMeta, cls).__dir__()\n except AttributeError:\n basedir = list(set((dir(type(cls)) + list(cls.__dict__))))\n\n if cls._tree is not None:\n for name in cls.param_names:\n basedir.append(name)\n\n basedir.sort()\n\n return basedir\n\n def __reduce__(cls):\n rv = super(_CompoundModelMeta, cls).__reduce__()\n\n if isinstance(rv, tuple):\n # Delete _evaluate from the members dict\n with suppress(KeyError):\n del rv[1][2]['_evaluate']\n\n return rv\n\n @property\n def submodel_names(cls):\n if cls._submodel_names is None:\n seen = {}\n names = []\n for idx, submodel in enumerate(cls._get_submodels()):\n name = str(submodel.name)\n if name in seen:\n names.append('{0}_{1}'.format(name, idx))\n if seen[name] >= 0:\n jdx = seen[name]\n names[jdx] = '{0}_{1}'.format(names[jdx], jdx)\n seen[name] = -1\n else:\n names.append(name)\n seen[name] = idx\n cls._submodel_names = tuple(names)\n\n return cls._submodel_names\n\n @property\n def param_names(cls):\n if cls._param_names is None:\n cls._init_param_names()\n\n return cls._param_names\n\n @property\n def fittable(cls):\n if cls._fittable is None:\n cls._fittable = all(m.fittable for m in cls._get_submodels())\n\n return cls._fittable\n\n # TODO: Maybe we could use make_function_with_signature for evaluate, but\n # it's probably not worth it (and I'm not sure what the limit is on number\n # of function arguments/local variables but we could break that limit for\n # complicated compound models...\n def evaluate(cls, *args):\n if cls._evaluate is None:\n func = cls._tree.evaluate(BINARY_OPERATORS,\n getter=cls._model_evaluate_getter)[0]\n # Making this a staticmethod isn't strictly necessary for Python 3,\n # but it is necessary on Python 2 since looking up cls._evaluate\n # will return an unbound method otherwise\n cls._evaluate = staticmethod(func)\n inputs = args[:cls.n_inputs]\n params = iter(args[cls.n_inputs:])\n result = cls._evaluate(inputs, params)\n if cls.n_outputs == 1:\n return result[0]\n else:\n return result\n\n # TODO: This supports creating a new compound model from two existing\n # compound models (or normal models) and a single operator. However, it\n # ought also to be possible to create a new model from an *entire*\n # expression, represented as a sequence of operators and their operands (or\n # an exiting ExpressionTree) and build that into a compound model without\n # creating an intermediate _CompoundModel class for every single operator\n # in the expression. This will prove to be a useful optimization in many\n # cases\n @classmethod\n def _from_operator(mcls, operator, left, right, additional_members={}):\n \"\"\"\n Given a Python operator (represented by a string, such as ``'+'``\n or ``'*'``, and two model classes or instances, return a new compound\n model that evaluates the given operator on the outputs of the left and\n right input models.\n\n If either of the input models are a model *class* (i.e. a subclass of\n `~astropy.modeling.Model`) then the returned model is a new subclass of\n `~astropy.modeling.Model` that may be instantiated with any parameter\n values. If both input models are *instances* of a model, a new class\n is still created, but this method returns an *instance* of that class,\n taking the parameter values from the parameters of the input model\n instances.\n\n If given, the ``additional_members`` `dict` may provide additional\n class members that should be added to the generated\n `~astropy.modeling.Model` subclass. Some members that are generated by\n this method should not be provided by ``additional_members``. These\n include ``_tree``, ``inputs``, ``outputs``, ``linear``,\n ``standard_broadcasting``, and ``__module__`. This is currently for\n internal use only.\n \"\"\"\n # Note, currently this only supports binary operators, but could be\n # easily extended to support unary operators (namely '-') if/when\n # needed\n children = []\n for child in (left, right):\n if isinstance(child, (_CompoundModelMeta, _CompoundModel)):\n \"\"\"\n Although the original child models were copied we make another\n copy here to ensure that changes in this child compound model\n parameters will not propagate to the reuslt, that is\n cm1 = Gaussian1D(1, 5, .1) + Gaussian1D()\n cm2 = cm1 | Scale()\n cm1.amplitude_0 = 100\n assert(cm2.amplitude_0 == 1)\n \"\"\"\n children.append(copy.deepcopy(child._tree))\n elif isinstance(child, Model):\n children.append(ExpressionTree(child.copy()))\n else:\n children.append(ExpressionTree(child))\n\n tree = ExpressionTree(operator, left=children[0], right=children[1])\n\n name = str('CompoundModel{0}'.format(_CompoundModelMeta._nextid))\n _CompoundModelMeta._nextid += 1\n\n mod = find_current_module(3)\n if mod:\n modname = mod.__name__\n else:\n modname = '__main__'\n\n inputs, outputs = mcls._check_inputs_and_outputs(operator, left, right)\n\n if operator in ('|', '+', '-'):\n linear = left.linear and right.linear\n else:\n # Which is not to say it is *definitely* not linear but it would be\n # trickier to determine\n linear = False\n\n standard_broadcasting = \\\n left.standard_broadcasting and right.standard_broadcasting\n\n # Note: If any other members are added here, make sure to mention them\n # in the docstring of this method.\n members = additional_members\n members.update({\n '_tree': tree,\n '_is_dynamic': True, # See docs for _ModelMeta._is_dynamic\n 'inputs': inputs,\n 'outputs': outputs,\n 'linear': linear,\n 'standard_broadcasting': standard_broadcasting,\n '__module__': str(modname)})\n\n new_cls = mcls(name, (_CompoundModel,), members)\n\n if isinstance(left, Model) and isinstance(right, Model):\n # Both models used in the operator were already instantiated models,\n # not model *classes*. As such it's not particularly useful to return\n # the class itself, but to instead produce a new instance:\n instance = new_cls()\n\n # Workaround for https://github.com/astropy/astropy/issues/3542\n # TODO: Any effort to restructure the tree-like data structure for\n # compound models should try to obviate this workaround--if\n # intermediate compound models are stored in the tree as well then\n # we can immediately check for custom inverses on sub-models when\n # computing the inverse\n instance._user_inverse = mcls._make_user_inverse(\n operator, left, right)\n\n if left._n_models == right._n_models:\n instance._n_models = left._n_models\n else:\n raise ValueError('Model sets must have the same number of '\n 'components.')\n\n return instance\n\n # Otherwise return the new uninstantiated class itself\n return new_cls\n\n @classmethod\n def _check_inputs_and_outputs(mcls, operator, left, right):\n # TODO: These aren't the full rules for handling inputs and outputs, but\n # this will handle most basic cases correctly\n if operator == '|':\n inputs = left.inputs\n outputs = right.outputs\n\n if left.n_outputs != right.n_inputs:\n raise ModelDefinitionError(\n \"Unsupported operands for |: {0} (n_inputs={1}, \"\n \"n_outputs={2}) and {3} (n_inputs={4}, n_outputs={5}); \"\n \"n_outputs for the left-hand model must match n_inputs \"\n \"for the right-hand model.\".format(\n left.name, left.n_inputs, left.n_outputs, right.name,\n right.n_inputs, right.n_outputs))\n elif operator == '&':\n inputs = combine_labels(left.inputs, right.inputs)\n outputs = combine_labels(left.outputs, right.outputs)\n else:\n # Without loss of generality\n inputs = left.inputs\n outputs = left.outputs\n\n if (left.n_inputs != right.n_inputs or\n left.n_outputs != right.n_outputs):\n raise ModelDefinitionError(\n \"Unsupported operands for {0}: {1} (n_inputs={2}, \"\n \"n_outputs={3}) and {4} (n_inputs={5}, n_outputs={6}); \"\n \"models must have the same n_inputs and the same \"\n \"n_outputs for this operator\".format(\n operator, left.name, left.n_inputs, left.n_outputs,\n right.name, right.n_inputs, right.n_outputs))\n\n return inputs, outputs\n\n @classmethod\n def _make_user_inverse(mcls, operator, left, right):\n \"\"\"\n Generates an inverse `Model` for this `_CompoundModel` when either\n model in the operation has a *custom inverse* that was manually\n assigned by the user.\n\n If either model has a custom inverse, and in particular if another\n `_CompoundModel` has a custom inverse, then none of that model's\n sub-models should be considered at all when computing the inverse.\n So in that case we just compute the inverse ahead of time and set\n it as the new compound model's custom inverse.\n\n Note, this use case only applies when combining model instances,\n since model classes don't currently have a notion of a \"custom\n inverse\" (though it could probably be supported by overriding the\n class's inverse property).\n\n TODO: Consider fixing things so the aforementioned class-based case\n works as well. However, for the present purposes this is good enough.\n \"\"\"\n\n if not (operator in ('&', '|') and\n (left._user_inverse or right._user_inverse)):\n # These are the only operators that support an inverse right now\n return None\n\n try:\n left_inv = left.inverse\n right_inv = right.inverse\n except NotImplementedError:\n # If either inverse is undefined then just return False; this\n # means the normal _CompoundModel.inverse routine will fail\n # naturally anyways, since it requires all sub-models to have\n # an inverse defined\n return None\n\n if operator == '&':\n return left_inv & right_inv\n else:\n return right_inv | left_inv\n\n # TODO: Perhaps, just perhaps, the post-order (or ???-order) ordering of\n # leaf nodes is something the ExpressionTree class itself could just know\n def _get_submodels(cls):\n # Would make this a lazyproperty but those don't currently work with\n # type objects\n if cls._submodels is not None:\n return cls._submodels\n\n submodels = [c.value for c in cls._tree.traverse_postorder()\n if c.isleaf]\n cls._submodels = submodels\n return submodels\n\n def _init_param_descriptors(cls):\n \"\"\"\n This routine sets up the names for all the parameters on a compound\n model, including figuring out unique names for those parameters and\n also mapping them back to their associated parameters of the underlying\n submodels.\n\n Setting this all up is costly, and only necessary for compound models\n that a user will directly interact with. For example when building an\n expression like::\n\n >>> M = (Model1 + Model2) * Model3 # doctest: +SKIP\n\n the user will generally never interact directly with the temporary\n result of the subexpression ``(Model1 + Model2)``. So there's no need\n to setup all the parameters for that temporary throwaway. Only once\n the full expression is built and the user initializes or introspects\n ``M`` is it necessary to determine its full parameterization.\n \"\"\"\n\n # Accessing cls.param_names will implicitly call _init_param_names if\n # needed and thus also set up the _param_map; I'm not crazy about that\n # design but it stands for now\n for param_name in cls.param_names:\n submodel_idx, submodel_param = cls._param_map[param_name]\n submodel = cls[submodel_idx]\n\n orig_param = getattr(submodel, submodel_param, None)\n\n if isinstance(submodel, Model):\n # Take the parameter's default from the model's value for that\n # parameter\n default = orig_param.value\n else:\n default = orig_param.default\n\n # Copy constraints\n constraints = dict((key, getattr(orig_param, key))\n for key in Model.parameter_constraints)\n\n # Note: Parameter.copy() returns a new unbound Parameter, never\n # a bound Parameter even if submodel is a Model instance (as\n # opposed to a Model subclass)\n new_param = orig_param.copy(name=param_name, default=default,\n unit=orig_param.unit,\n **constraints)\n\n setattr(cls, param_name, new_param)\n\n def _init_param_names(cls):\n \"\"\"\n This subroutine is solely for setting up the ``param_names`` attribute\n itself.\n\n See ``_init_param_descriptors`` for the full parameter setup.\n \"\"\"\n\n # Currently this skips over Model *instances* in the expression tree;\n # basically these are treated as constants and do not add\n # fittable/tunable parameters to the compound model.\n # TODO: I'm not 100% happy with this design, and maybe we need some\n # interface for distinguishing fittable/settable parameters with\n # *constant* parameters (which would be distinct from parameters with\n # fixed constraints since they're permanently locked in place). But I'm\n # not sure if this is really the best way to treat the issue.\n\n names = []\n param_map = {}\n\n # Start counting the suffix indices to put on parameter names from the\n # slice_offset. Usually this will just be zero, but for compound\n # models that were sliced from another compound model this may be > 0\n param_suffix = cls._slice_offset\n\n for idx, model in enumerate(cls._get_submodels()):\n if not model.param_names:\n # Skip models that don't have parameters in the numbering\n # TODO: Reevaluate this if it turns out to be confusing, though\n # parameter-less models are not very common in practice (there\n # are a few projections that don't take parameters)\n continue\n\n for param_name in model.param_names:\n # This is sort of heuristic, but we want to check that\n # model.param_name *actually* returns a Parameter descriptor,\n # and that the model isn't some inconsistent type that happens\n # to have a param_names attribute but does not actually\n # implement settable parameters.\n # In the future we can probably remove this check, but this is\n # here specifically to support the legacy compat\n # _CompositeModel which can be considered a pathological case\n # in the context of the new framework\n # if not isinstance(getattr(model, param_name, None),\n # Parameter):\n # break\n name = '{0}_{1}'.format(param_name, param_suffix + idx)\n names.append(name)\n param_map[name] = (idx, param_name)\n\n cls._param_names = tuple(names)\n cls._param_map = param_map\n cls._param_map_inverse = dict((v, k) for k, v in param_map.items())\n\n def _format_expression(cls):\n # TODO: At some point might be useful to make a public version of this,\n # albeit with more formatting options\n return cls._tree.format_expression(OPERATOR_PRECEDENCE)\n\n def _format_components(cls):\n return '\\n\\n'.join('[{0}]: {1!r}'.format(idx, m)\n for idx, m in enumerate(cls._get_submodels()))\n\n def _normalize_index(cls, index):\n \"\"\"\n Converts an index given to __getitem__ to either an integer, or\n a slice with integer start and stop values.\n\n If the length of the slice is exactly 1 this converts the index to a\n simple integer lookup.\n\n Negative integers are converted to positive integers.\n \"\"\"\n\n def get_index_from_name(name):\n try:\n return cls.submodel_names.index(name)\n except ValueError:\n raise IndexError(\n 'Compound model {0} does not have a component named '\n '{1}'.format(cls.name, name))\n\n def check_for_negative_index(index):\n if index < 0:\n new_index = len(cls.submodel_names) + index\n if new_index < 0:\n # If still < 0 then this is an invalid index\n raise IndexError(\n \"Model index {0} out of range.\".format(index))\n else:\n index = new_index\n\n return index\n\n if isinstance(index, six.string_types):\n return get_index_from_name(index)\n elif isinstance(index, slice):\n if index.step not in (1, None):\n # In principle it could be but I can scarcely imagine a case\n # where it would be useful. If someone can think of one then\n # we can enable it.\n raise ValueError(\n \"Step not supported for compound model slicing.\")\n start = index.start if index.start is not None else 0\n stop = (index.stop\n if index.stop is not None else len(cls.submodel_names))\n if isinstance(start, (int, np.integer)):\n start = check_for_negative_index(start)\n if isinstance(stop, (int, np.integer)):\n stop = check_for_negative_index(stop)\n if isinstance(start, six.string_types):\n start = get_index_from_name(start)\n if isinstance(stop, six.string_types):\n stop = get_index_from_name(stop) + 1\n length = stop - start\n\n if length == 1:\n return start\n elif length <= 0:\n raise ValueError(\"Empty slice of a compound model.\")\n\n return slice(start, stop)\n elif isinstance(index, (int, np.integer)):\n if index >= len(cls.submodel_names):\n raise IndexError(\n \"Model index {0} out of range.\".format(index))\n\n return check_for_negative_index(index)\n\n raise TypeError(\n 'Submodels can be indexed either by their integer order or '\n 'their name (got {0!r}).'.format(index))\n\n def _get_slice(cls, start, stop):\n \"\"\"\n Return a new model build from a sub-expression of the expression\n represented by this model.\n\n Right now this is highly inefficient, as it creates a new temporary\n model for each operator that appears in the sub-expression. It would\n be better if this just built a new expression tree, and the new model\n instantiated directly from that tree.\n\n Once tree -> model instantiation is possible this should be fixed to\n use that instead.\n \"\"\"\n\n members = {'_slice_offset': cls._slice_offset + start}\n operators = dict((oper, _model_oper(oper, additional_members=members))\n for oper in BINARY_OPERATORS)\n\n return cls._tree.evaluate(operators, start=start, stop=stop)\n\n @staticmethod\n def _model_evaluate_getter(idx, model):\n n_params = len(model.param_names)\n n_inputs = model.n_inputs\n n_outputs = model.n_outputs\n\n # There is currently an unfortunate inconsistency in some models, which\n # requires them to be instantiated for their evaluate to work. I think\n # that needs to be reconsidered and fixed somehow, but in the meantime\n # we need to check for that case\n if (not isinstance(model, Model) and\n isinstancemethod(model, model.evaluate)):\n if n_outputs == 1:\n # Where previously model was a class, now make an instance\n def f(inputs, params):\n param_values = tuple(islice(params, n_params))\n return (model(*param_values).evaluate(\n *chain(inputs, param_values)),)\n else:\n def f(inputs, params):\n param_values = tuple(islice(params, n_params))\n return model(*param_values).evaluate(\n *chain(inputs, param_values))\n else:\n evaluate = model.evaluate\n if n_outputs == 1:\n f = lambda inputs, params: \\\n (evaluate(*chain(inputs, islice(params, n_params))),)\n else:\n f = lambda inputs, params: \\\n evaluate(*chain(inputs, islice(params, n_params)))\n\n return (f, n_inputs, n_outputs)\n\n\[email protected]_metaclass(_CompoundModelMeta)\nclass _CompoundModel(Model):\n fit_deriv = None\n col_fit_deriv = False\n\n _submodels = None\n\n def __str__(self):\n expression = self._format_expression()\n components = self._format_components()\n keywords = [\n ('Expression', expression),\n ('Components', '\\n' + indent(components))\n ]\n return super(_CompoundModel, self)._format_str(keywords=keywords)\n\n def __getattr__(self, attr):\n # This __getattr__ is necessary, because _CompoundModelMeta creates\n # Parameter descriptors *lazily*--they do not exist in the class\n # __dict__ until one of them has been accessed.\n # However, this is at odds with how Python looks up descriptors (see\n # (https://docs.python.org/3/reference/datamodel.html#invoking-descriptors)\n # which is to look directly in the class __dict__\n # This workaround allows descriptors to work correctly when they are\n # not initially found in the class __dict__\n value = getattr(self.__class__, attr)\n if hasattr(value, '__get__'):\n # Object is a descriptor, so we should really return the result of\n # its __get__\n value = value.__get__(self, self.__class__)\n return value\n\n def __getitem__(self, index):\n index = self.__class__._normalize_index(index)\n model = self.__class__[index]\n\n if isinstance(index, slice):\n param_names = model.param_names\n else:\n param_map = self.__class__._param_map_inverse\n param_names = tuple(param_map[index, name]\n for name in model.param_names)\n\n return model._from_existing(self, param_names)\n\n if sys.version_info[:3] < (2, 7, 3):\n def __reduce__(self):\n # _CompoundModel classes have a generated evaluate() that is cached\n # off in the _evaluate attribute. This can't be pickled, and so\n # should be regenerated after unpickling (alas)\n if find_current_module(2) is not copy:\n # The copy module also uses __reduce__, but there's no problem\n # there.\n raise RuntimeError(\n \"Pickling of compound models is not possible using Python \"\n \"versions less than 2.7.3 due to a bug in Python. See \"\n \"http://docs.astropy.org/en/v1.0.4/known_issues.html#\"\n \"pickling-error-on-compound-models for more information (\"\n \"tried to pickle {0!r}).\".format(self))\n else:\n return super(_CompoundModel, self).__reduce__()\n\n @property\n def submodel_names(self):\n return self.__class__.submodel_names\n\n @sharedmethod\n def n_submodels(self):\n return len(self.submodel_names)\n\n @property\n def param_names(self):\n return self.__class__.param_names\n\n @property\n def fittable(self):\n return self.__class__.fittable\n\n @sharedmethod\n def evaluate(self, *args):\n return self.__class__.evaluate(*args)\n\n # TODO: The way this works is highly inefficient--the inverse is created by\n # making a new model for each operator in the compound model, which could\n # potentially mean creating a large number of temporary throwaway model\n # classes. This can definitely be optimized in the future by implementing\n # a way to construct a single model class from an existing tree\n @property\n def inverse(self):\n def _not_implemented(oper):\n def _raise(x, y):\n raise NotImplementedError(\n \"The inverse is not currently defined for compound \"\n \"models created using the {0} operator.\".format(oper))\n return _raise\n\n operators = dict((oper, _not_implemented(oper))\n for oper in ('+', '-', '*', '/', '**'))\n operators['&'] = operator.and_\n # Reverse the order of compositions\n operators['|'] = lambda x, y: operator.or_(y, x)\n\n leaf_idx = -1\n\n def getter(idx, model):\n try:\n # By indexing on self[] this will return an instance of the\n # model, with all the appropriate parameters set, which is\n # currently required to return an inverse\n return self[idx].inverse\n except NotImplementedError:\n raise NotImplementedError(\n \"All models in a composite model must have an inverse \"\n \"defined in order for the composite model to have an \"\n \"inverse. {0!r} does not have an inverse.\".format(model))\n\n return self._tree.evaluate(operators, getter=getter)\n\n @sharedmethod\n def _get_submodels(self):\n return self.__class__._get_submodels()\n\n def _parameter_units_for_data_units(self, input_units, output_units):\n units_for_data = {}\n for imodel, model in enumerate(self._submodels):\n units_for_data_sub = model._parameter_units_for_data_units(input_units, output_units)\n for param_sub in units_for_data_sub:\n param = self._param_map_inverse[(imodel, param_sub)]\n units_for_data[param] = units_for_data_sub[param_sub]\n return units_for_data\n\n\ndef custom_model(*args, **kwargs):\n \"\"\"\n Create a model from a user defined function. The inputs and parameters of\n the model will be inferred from the arguments of the function.\n\n This can be used either as a function or as a decorator. See below for\n examples of both usages.\n\n .. note::\n\n All model parameters have to be defined as keyword arguments with\n default values in the model function. Use `None` as a default argument\n value if you do not want to have a default value for that parameter.\n\n Parameters\n ----------\n func : function\n Function which defines the model. It should take N positional\n arguments where ``N`` is dimensions of the model (the number of\n independent variable in the model), and any number of keyword arguments\n (the parameters). It must return the value of the model (typically as\n an array, but can also be a scalar for scalar inputs). This\n corresponds to the `~astropy.modeling.Model.evaluate` method.\n fit_deriv : function, optional\n Function which defines the Jacobian derivative of the model. I.e., the\n derivative with respect to the *parameters* of the model. It should\n have the same argument signature as ``func``, but should return a\n sequence where each element of the sequence is the derivative\n with respect to the corresponding argument. This corresponds to the\n :meth:`~astropy.modeling.FittableModel.fit_deriv` method.\n\n Examples\n --------\n Define a sinusoidal model function as a custom 1D model::\n\n >>> from astropy.modeling.models import custom_model\n >>> import numpy as np\n >>> def sine_model(x, amplitude=1., frequency=1.):\n ... return amplitude * np.sin(2 * np.pi * frequency * x)\n >>> def sine_deriv(x, amplitude=1., frequency=1.):\n ... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)\n >>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)\n\n Create an instance of the custom model and evaluate it::\n\n >>> model = SineModel()\n >>> model(0.25)\n 1.0\n\n This model instance can now be used like a usual astropy model.\n\n The next example demonstrates a 2D Moffat function model, and also\n demonstrates the support for docstrings (this example could also include\n a derivative, but it has been omitted for simplicity)::\n\n >>> @custom_model\n ... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,\n ... alpha=1.0):\n ... \\\"\\\"\\\"Two dimensional Moffat function.\\\"\\\"\\\"\n ... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2\n ... return amplitude * (1 + rr_gg) ** (-alpha)\n ...\n >>> print(Moffat2D.__doc__)\n Two dimensional Moffat function.\n >>> model = Moffat2D()\n >>> model(1, 1) # doctest: +FLOAT_CMP\n 0.3333333333333333\n \"\"\"\n\n fit_deriv = kwargs.get('fit_deriv', None)\n\n if len(args) == 1 and six.callable(args[0]):\n return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)\n elif not args:\n return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)\n else:\n raise TypeError(\n \"{0} takes at most one positional argument (the callable/\"\n \"function to be turned into a model. When used as a decorator \"\n \"it should be passed keyword arguments only (if \"\n \"any).\".format(__name__))\n\n\ndef _custom_model_wrapper(func, fit_deriv=None):\n \"\"\"\n Internal implementation `custom_model`.\n\n When `custom_model` is called as a function its arguments are passed to\n this function, and the result of this function is returned.\n\n When `custom_model` is used as a decorator a partial evaluation of this\n function is returned by `custom_model`.\n \"\"\"\n\n if not six.callable(func):\n raise ModelDefinitionError(\n \"func is not callable; it must be a function or other callable \"\n \"object\")\n\n if fit_deriv is not None and not six.callable(fit_deriv):\n raise ModelDefinitionError(\n \"fit_deriv not callable; it must be a function or other \"\n \"callable object\")\n\n model_name = func.__name__\n\n inputs, params = get_inputs_and_params(func)\n\n if (fit_deriv is not None and\n len(six.get_function_defaults(fit_deriv)) != len(params)):\n raise ModelDefinitionError(\"derivative function should accept \"\n \"same number of parameters as func.\")\n\n # TODO: Maybe have a clever scheme for default output name?\n if inputs:\n output_names = (inputs[0].name,)\n else:\n output_names = ('x',)\n\n params = dict((param.name, Parameter(param.name, default=param.default))\n for param in params)\n\n mod = find_current_module(2)\n if mod:\n modname = mod.__name__\n else:\n modname = '__main__'\n\n members = {\n '__module__': str(modname),\n '__doc__': func.__doc__,\n 'inputs': tuple(x.name for x in inputs),\n 'outputs': output_names,\n 'evaluate': staticmethod(func),\n }\n\n if fit_deriv is not None:\n members['fit_deriv'] = staticmethod(fit_deriv)\n\n members.update(params)\n\n return type(model_name, (FittableModel,), members)\n\n\ndef render_model(model, arr=None, coords=None):\n \"\"\"\n Evaluates a model on an input array. Evaluation is limited to\n a bounding box if the `Model.bounding_box` attribute is set.\n\n Parameters\n ----------\n model : `Model`\n Model to be evaluated.\n arr : `numpy.ndarray`, optional\n Array on which the model is evaluated.\n coords : array-like, optional\n Coordinate arrays mapping to ``arr``, such that\n ``arr[coords] == arr``.\n\n Returns\n -------\n array : `numpy.ndarray`\n The model evaluated on the input ``arr`` or a new array from ``coords``.\n If ``arr`` and ``coords`` are both `None`, the returned array is\n limited to the `Model.bounding_box` limits. If\n `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.\n\n Examples\n --------\n :ref:`bounding-boxes`\n \"\"\"\n\n bbox = model.bounding_box\n\n if (coords is None) & (arr is None) & (bbox is None):\n raise ValueError('If no bounding_box is set, coords or arr must be input.')\n\n # for consistent indexing\n if model.n_inputs == 1:\n if coords is not None:\n coords = [coords]\n if bbox is not None:\n bbox = [bbox]\n\n if arr is not None:\n arr = arr.copy()\n # Check dimensions match model\n if arr.ndim != model.n_inputs:\n raise ValueError('number of array dimensions inconsistent with '\n 'number of model inputs.')\n if coords is not None:\n # Check dimensions match arr and model\n coords = np.array(coords)\n if len(coords) != model.n_inputs:\n raise ValueError('coordinate length inconsistent with the number '\n 'of model inputs.')\n if arr is not None:\n if coords[0].shape != arr.shape:\n raise ValueError('coordinate shape inconsistent with the '\n 'array shape.')\n else:\n arr = np.zeros(coords[0].shape)\n\n if bbox is not None:\n # assures position is at center pixel, important when using add_array\n pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))\n for bb in bbox]).astype(int).T\n\n if coords is not None:\n sub_shape = tuple(delta * 2 + 1)\n sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])\n else:\n limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]\n sub_coords = np.mgrid[limits]\n\n sub_coords = sub_coords[::-1]\n\n if arr is None:\n arr = model(*sub_coords)\n else:\n try:\n arr = add_array(arr, model(*sub_coords), pos)\n except ValueError:\n raise ValueError('The `bounding_box` is larger than the input'\n ' arr in one or more dimensions. Set '\n '`model.bounding_box = None`.')\n else:\n\n if coords is None:\n im_shape = arr.shape\n limits = [slice(i) for i in im_shape]\n coords = np.mgrid[limits]\n\n arr += model(*coords[::-1])\n\n return arr\n\n\ndef _prepare_inputs_single_model(model, params, inputs, **kwargs):\n broadcasts = []\n\n for idx, _input in enumerate(inputs):\n input_shape = _input.shape\n\n # Ensure that array scalars are always upgrade to 1-D arrays for the\n # sake of consistency with how parameters work. They will be cast back\n # to scalars at the end\n if not input_shape:\n inputs[idx] = _input.reshape((1,))\n\n if not params:\n max_broadcast = input_shape\n else:\n max_broadcast = ()\n\n for param in params:\n try:\n if model.standard_broadcasting:\n broadcast = check_broadcast(input_shape, param.shape)\n else:\n broadcast = input_shape\n except IncompatibleShapeError:\n raise ValueError(\n \"Model input argument {0!r} of shape {1!r} cannot be \"\n \"broadcast with parameter {2!r} of shape \"\n \"{3!r}.\".format(model.inputs[idx], input_shape,\n param.name, param.shape))\n\n if len(broadcast) > len(max_broadcast):\n max_broadcast = broadcast\n elif len(broadcast) == len(max_broadcast):\n max_broadcast = max(max_broadcast, broadcast)\n\n broadcasts.append(max_broadcast)\n\n if model.n_outputs > model.n_inputs:\n if len(set(broadcasts)) > 1:\n raise ValueError(\n \"For models with n_outputs > n_inputs, the combination of \"\n \"all inputs and parameters must broadcast to the same shape, \"\n \"which will be used as the shape of all outputs. In this \"\n \"case some of the inputs had different shapes, so it is \"\n \"ambiguous how to format outputs for this model. Try using \"\n \"inputs that are all the same size and shape.\")\n else:\n # Extend the broadcasts list to include shapes for all outputs\n extra_outputs = model.n_outputs - model.n_inputs\n if not broadcasts:\n # If there were no inputs then the broadcasts list is empty\n # just add a None since there is no broadcasting of outputs and\n # inputs necessary (see _prepare_outputs_single_model)\n broadcasts.append(None)\n broadcasts.extend([broadcasts[0]] * extra_outputs)\n\n return inputs, (broadcasts,)\n\n\ndef _prepare_outputs_single_model(model, outputs, format_info):\n broadcasts = format_info[0]\n\n outputs = list(outputs)\n\n for idx, output in enumerate(outputs):\n broadcast_shape = broadcasts[idx]\n if broadcast_shape is not None:\n if not broadcast_shape:\n # Shape is (), i.e. a scalar should be returned\n outputs[idx] = output.item()\n else:\n outputs[idx] = output.reshape(broadcast_shape)\n\n return tuple(outputs)\n\n\ndef _prepare_inputs_model_set(model, params, inputs, n_models, model_set_axis,\n **kwargs):\n reshaped = []\n pivots = []\n\n for idx, _input in enumerate(inputs):\n max_param_shape = ()\n\n if n_models > 1 and model_set_axis is not False:\n # Use the shape of the input *excluding* the model axis\n input_shape = (_input.shape[:model_set_axis] +\n _input.shape[model_set_axis + 1:])\n else:\n input_shape = _input.shape\n\n for param in params:\n try:\n check_broadcast(input_shape, param.shape)\n except IncompatibleShapeError:\n raise ValueError(\n \"Model input argument {0!r} of shape {1!r} cannot be \"\n \"broadcast with parameter {2!r} of shape \"\n \"{3!r}.\".format(model.inputs[idx], input_shape,\n param.name, param.shape))\n\n if len(param.shape) > len(max_param_shape):\n max_param_shape = param.shape\n\n # We've now determined that, excluding the model_set_axis, the\n # input can broadcast with all the parameters\n input_ndim = len(input_shape)\n if model_set_axis is False:\n if len(max_param_shape) > input_ndim:\n # Just needs to prepend new axes to the input\n n_new_axes = 1 + len(max_param_shape) - input_ndim\n new_axes = (1,) * n_new_axes\n new_shape = new_axes + _input.shape\n pivot = model.model_set_axis\n else:\n pivot = input_ndim - len(max_param_shape)\n new_shape = (_input.shape[:pivot] + (1,) +\n _input.shape[pivot:])\n new_input = _input.reshape(new_shape)\n else:\n if len(max_param_shape) >= input_ndim:\n n_new_axes = len(max_param_shape) - input_ndim\n pivot = model.model_set_axis\n new_axes = (1,) * n_new_axes\n new_shape = (_input.shape[:pivot + 1] + new_axes +\n _input.shape[pivot + 1:])\n new_input = _input.reshape(new_shape)\n else:\n pivot = _input.ndim - len(max_param_shape) - 1\n new_input = np.rollaxis(_input, model_set_axis,\n pivot + 1)\n\n pivots.append(pivot)\n reshaped.append(new_input)\n\n if model.n_inputs < model.n_outputs:\n pivots.extend([model_set_axis] * (model.n_outputs - model.n_inputs))\n\n return reshaped, (pivots,)\n\n\ndef _prepare_outputs_model_set(model, outputs, format_info):\n pivots = format_info[0]\n\n outputs = list(outputs)\n\n for idx, output in enumerate(outputs):\n pivot = pivots[idx]\n if pivot < output.ndim and pivot != model.model_set_axis:\n outputs[idx] = np.rollaxis(output, pivot,\n model.model_set_axis)\n\n return tuple(outputs)\n\n\ndef _validate_input_shapes(inputs, argnames, n_models, model_set_axis,\n validate_broadcasting):\n \"\"\"\n Perform basic validation of model inputs--that they are mutually\n broadcastable and that they have the minimum dimensions for the given\n model_set_axis.\n\n If validation succeeds, returns the total shape that will result from\n broadcasting the input arrays with each other.\n \"\"\"\n\n check_model_set_axis = n_models > 1 and model_set_axis is not False\n\n if not (validate_broadcasting or check_model_set_axis):\n # Nothing else needed here\n return\n\n all_shapes = []\n\n for idx, _input in enumerate(inputs):\n input_shape = np.shape(_input)\n # Ensure that the input's model_set_axis matches the model's\n # n_models\n if input_shape and check_model_set_axis:\n # Note: Scalar inputs *only* get a pass on this\n if len(input_shape) < model_set_axis + 1:\n raise ValueError(\n \"For model_set_axis={0}, all inputs must be at \"\n \"least {1}-dimensional.\".format(\n model_set_axis, model_set_axis + 1))\n elif input_shape[model_set_axis] != n_models:\n raise ValueError(\n \"Input argument {0!r} does not have the correct \"\n \"dimensions in model_set_axis={1} for a model set with \"\n \"n_models={2}.\".format(argnames[idx], model_set_axis,\n n_models))\n all_shapes.append(input_shape)\n\n if not validate_broadcasting:\n return\n\n try:\n input_broadcast = check_broadcast(*all_shapes)\n except IncompatibleShapeError as exc:\n shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args\n arg_a = argnames[shape_a_idx]\n arg_b = argnames[shape_b_idx]\n\n raise ValueError(\n \"Model input argument {0!r} of shape {1!r} cannot \"\n \"be broadcast with input {2!r} of shape {3!r}\".format(\n arg_a, shape_a, arg_b, shape_b))\n\n return input_broadcast\n\n\ncopyreg.pickle(_ModelMeta, _ModelMeta.__reduce__)\ncopyreg.pickle(_CompoundModelMeta, _CompoundModelMeta.__reduce__)\n",
"# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n# TEST_UNICODE_LITERALS\n\nimport warnings\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom ...extern.six.moves import cStringIO as StringIO\nfrom ... import units as u\nfrom ... import time\nfrom ... import coordinates\nfrom ... import table\nfrom ...utils.data_info import data_info_factory, dtype_info_name\nfrom ..table_helpers import simple_table\n\n\ndef test_table_info_attributes(table_types):\n \"\"\"\n Test the info() method of printing a summary of table column attributes\n \"\"\"\n a = np.array([1, 2, 3], dtype='int32')\n b = np.array([1, 2, 3], dtype='float32')\n c = np.array(['a', 'c', 'e'], dtype='|S1')\n t = table_types.Table([a, b, c], names=['a', 'b', 'c'])\n\n # Minimal output for a typical table\n tinfo = t.info(out=None)\n subcls = ['class'] if table_types.Table.__name__ == 'MyTable' else []\n assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format',\n 'description', 'class', 'n_bad', 'length']\n assert np.all(tinfo['name'] == ['a', 'b', 'c'])\n assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1')])\n if subcls:\n assert np.all(tinfo['class'] == ['MyColumn'] * 3)\n\n # All output fields including a mixin column\n t['d'] = [1, 2, 3] * u.m\n t['d'].description = 'quantity'\n t['a'].format = '%02d'\n t['e'] = time.Time([1, 2, 3], format='mjd')\n t['e'].info.description = 'time'\n t['f'] = coordinates.SkyCoord([1, 2, 3], [1, 2, 3], unit='deg')\n t['f'].info.description = 'skycoord'\n\n tinfo = t.info(out=None)\n assert np.all(tinfo['name'] == 'a b c d e f'.split())\n assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'float64',\n 'object', 'object'])\n assert np.all(tinfo['unit'] == ['', '', '', 'm', '', 'deg,deg'])\n assert np.all(tinfo['format'] == ['%02d', '', '', '', '', ''])\n assert np.all(tinfo['description'] == ['', '', '', 'quantity', 'time', 'skycoord'])\n cls = t.ColumnClass.__name__\n assert np.all(tinfo['class'] == [cls, cls, cls, cls, 'Time', 'SkyCoord'])\n\n # Test that repr(t.info) is same as t.info()\n out = StringIO()\n t.info(out=out)\n assert repr(t.info) == out.getvalue()\n\n\ndef test_table_info_stats(table_types):\n \"\"\"\n Test the info() method of printing a summary of table column statistics\n \"\"\"\n a = np.array([1, 2, 1, 2], dtype='int32')\n b = np.array([1, 2, 1, 2], dtype='float32')\n c = np.array(['a', 'c', 'e', 'f'], dtype='|S1')\n d = time.Time([1, 2, 1, 2], format='mjd')\n t = table_types.Table([a, b, c, d], names=['a', 'b', 'c', 'd'])\n\n # option = 'stats'\n masked = 'masked=True ' if t.masked else ''\n out = StringIO()\n t.info('stats', out=out)\n table_header_line = '<{0} {1}length=4>'.format(t.__class__.__name__, masked)\n exp = [table_header_line,\n 'name mean std min max',\n '---- ---- --- --- ---',\n ' a 1.5 0.5 1 2',\n ' b 1.5 0.5 1.0 2.0',\n ' c -- -- -- --',\n ' d -- -- 1.0 2.0']\n assert out.getvalue().splitlines() == exp\n\n # option = ['attributes', 'stats']\n tinfo = t.info(['attributes', 'stats'], out=None)\n assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description',\n 'class', 'mean', 'std', 'min', 'max', 'n_bad', 'length']\n assert np.all(tinfo['mean'] == ['1.5', '1.5', '--', '--'])\n assert np.all(tinfo['std'] == ['0.5', '0.5', '--', '--'])\n assert np.all(tinfo['min'] == ['1', '1.0', '--', '1.0'])\n assert np.all(tinfo['max'] == ['2', '2.0', '--', '2.0'])\n\n out = StringIO()\n t.info('stats', out=out)\n exp = [table_header_line,\n 'name mean std min max',\n '---- ---- --- --- ---',\n ' a 1.5 0.5 1 2',\n ' b 1.5 0.5 1.0 2.0',\n ' c -- -- -- --',\n ' d -- -- 1.0 2.0']\n assert out.getvalue().splitlines() == exp\n\n # option = ['attributes', custom]\n custom = data_info_factory(names=['sum', 'first'],\n funcs=[np.sum, lambda col: col[0]])\n out = StringIO()\n tinfo = t.info(['attributes', custom], out=None)\n assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description',\n 'class', 'sum', 'first', 'n_bad', 'length']\n assert np.all(tinfo['name'] == ['a', 'b', 'c', 'd'])\n assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'object'])\n assert np.all(tinfo['sum'] == ['6', '6.0', '--', '--'])\n assert np.all(tinfo['first'] == ['1', '1.0', 'a', '1.0'])\n\n\ndef test_data_info():\n \"\"\"\n Test getting info for just a column.\n \"\"\"\n cols = [table.Column([1.0, 2.0, np.nan], name='name',\n description='description', unit='m/s'),\n table.MaskedColumn([1.0, 2.0, 3.0], name='name',\n description='description', unit='m/s',\n mask=[False, False, True])]\n for c in cols:\n # Test getting the full ordered dict\n cinfo = c.info(out=None)\n assert cinfo == OrderedDict([('name', 'name'),\n ('dtype', 'float64'),\n ('shape', ''),\n ('unit', 'm / s'),\n ('format', ''),\n ('description', 'description'),\n ('class', type(c).__name__),\n ('n_bad', 1),\n ('length', 3)])\n\n # Test the console (string) version which omits trivial values\n out = StringIO()\n c.info(out=out)\n exp = ['name = name',\n 'dtype = float64',\n 'unit = m / s',\n 'description = description',\n 'class = {0}'.format(type(c).__name__),\n 'n_bad = 1',\n 'length = 3']\n assert out.getvalue().splitlines() == exp\n\n # repr(c.info) gives the same as c.info()\n assert repr(c.info) == out.getvalue()\n\n # Test stats info\n cinfo = c.info('stats', out=None)\n assert cinfo == OrderedDict([('name', 'name'),\n ('mean', '1.5'),\n ('std', '0.5'),\n ('min', '1.0'),\n ('max', '2.0'),\n ('n_bad', 1),\n ('length', 3)])\n\n\ndef test_data_info_subclass():\n class Column(table.Column):\n \"\"\"\n Confusingly named Column on purpose, but that is legal.\n \"\"\"\n pass\n for data in ([], [1, 2]):\n c = Column(data, dtype='int64')\n cinfo = c.info(out=None)\n assert cinfo == OrderedDict([('dtype', 'int64'),\n ('shape', ''),\n ('unit', ''),\n ('format', ''),\n ('description', ''),\n ('class', 'Column'),\n ('n_bad', 0),\n ('length', len(data))])\n\n\ndef test_scalar_info():\n \"\"\"\n Make sure info works with scalar values\n \"\"\"\n c = time.Time('2000:001')\n cinfo = c.info(out=None)\n assert cinfo['n_bad'] == 0\n assert 'length' not in cinfo\n\n\ndef test_empty_table():\n t = table.Table()\n out = StringIO()\n t.info(out=out)\n exp = ['<Table length=0>', '<No columns>']\n assert out.getvalue().splitlines() == exp\n\n\ndef test_class_attribute():\n \"\"\"\n Test that class info column is suppressed only for identical non-mixin\n columns.\n \"\"\"\n vals = [[1] * u.m, [2] * u.m]\n\n texp = ['<Table length=1>',\n 'name dtype unit',\n '---- ------- ----',\n 'col0 float64 m',\n 'col1 float64 m']\n\n qexp = ['<QTable length=1>',\n 'name dtype unit class ',\n '---- ------- ---- --------',\n 'col0 float64 m Quantity',\n 'col1 float64 m Quantity']\n\n for table_cls, exp in ((table.Table, texp),\n (table.QTable, qexp)):\n t = table_cls(vals)\n out = StringIO()\n t.info(out=out)\n assert out.getvalue().splitlines() == exp\n\n\ndef test_ignore_warnings():\n t = table.Table([[np.nan, np.nan]])\n with warnings.catch_warnings(record=True) as warns:\n t.info('stats', out=None)\n assert len(warns) == 0\n\n\ndef test_no_deprecation_warning():\n # regression test for #5459, where numpy deprecation warnings were\n # emitted unnecessarily.\n t = simple_table()\n with warnings.catch_warnings(record=True) as warns:\n t.info()\n assert len(warns) == 0\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\ntry:\n from cStringIO import StringIO\nexcept ImportError: # cStringIO doesn't exist in Python 3\n from io import BytesIO\n StringIO = lambda x: BytesIO(x.encode('ascii'))\n\nimport os\nimport functools\n\nfrom textwrap import dedent\n\nimport pytest\nimport numpy as np\nfrom numpy import ma\n\nfrom ....table import Table, MaskedColumn\nfrom ... import ascii\nfrom ...ascii.core import ParameterError, FastOptionsError\nfrom ...ascii.cparser import CParserError\nfrom ..fastbasic import FastBasic, FastCsv, FastTab, FastCommentedHeader, \\\n FastRdb, FastNoHeader\nfrom .common import assert_equal, assert_almost_equal, assert_true\nfrom ....extern import six\nfrom ....extern.six.moves import range\n\nTRAVIS = os.environ.get('TRAVIS', False)\n\n\ndef assert_table_equal(t1, t2, check_meta=False):\n assert_equal(len(t1), len(t2))\n assert_equal(t1.colnames, t2.colnames)\n if check_meta:\n assert_equal(t1.meta, t2.meta)\n for name in t1.colnames:\n if len(t1) != 0:\n assert_equal(t1[name].dtype.kind, t2[name].dtype.kind)\n if not isinstance(t1[name], MaskedColumn):\n for i, el in enumerate(t1[name]):\n try:\n if not isinstance(el, six.string_types) and np.isnan(el):\n assert_true(not isinstance(t2[name][i], six.string_types) and np.isnan(t2[name][i]))\n elif isinstance(el, six.string_types):\n assert_equal(el, t2[name][i])\n else:\n assert_almost_equal(el, t2[name][i])\n except (TypeError, NotImplementedError):\n pass # ignore for now\n\n\n# Use this counter to create a unique filename for each file created in a test\n# if this function is called more than once in a single test\n_filename_counter = 0\n\n\ndef _read(tmpdir, table, Reader=None, format=None, parallel=False, check_meta=False, **kwargs):\n # make sure we have a newline so table can't be misinterpreted as a filename\n global _filename_counter\n\n table += '\\n'\n reader = Reader(**kwargs)\n t1 = reader.read(table)\n t2 = reader.read(StringIO(table))\n t3 = reader.read(table.splitlines())\n t4 = ascii.read(table, format=format, guess=False, **kwargs)\n t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs)\n assert_table_equal(t1, t2, check_meta=check_meta)\n assert_table_equal(t2, t3, check_meta=check_meta)\n assert_table_equal(t3, t4, check_meta=check_meta)\n assert_table_equal(t4, t5, check_meta=check_meta)\n\n if parallel:\n if TRAVIS:\n pytest.xfail(\"Multiprocessing can sometimes fail on Travis CI\")\n elif os.name == 'nt':\n pytest.xfail(\"Multiprocessing is currently unsupported on Windows\")\n t6 = ascii.read(table, format=format, guess=False, fast_reader={\n 'parallel': True}, **kwargs)\n assert_table_equal(t1, t6, check_meta=check_meta)\n\n filename = str(tmpdir.join('table{0}.txt'.format(_filename_counter)))\n _filename_counter += 1\n\n with open(filename, 'wb') as f:\n f.write(table.encode('ascii'))\n f.flush()\n\n t7 = ascii.read(filename, format=format, guess=False, **kwargs)\n if parallel:\n t8 = ascii.read(filename, format=format, guess=False, fast_reader={\n 'parallel': True}, **kwargs)\n\n assert_table_equal(t1, t7, check_meta=check_meta)\n if parallel:\n assert_table_equal(t1, t8, check_meta=check_meta)\n return t1\n\n\[email protected](scope='function')\ndef read_basic(tmpdir, request):\n return functools.partial(_read, tmpdir, Reader=FastBasic, format='basic')\n\n\[email protected](scope='function')\ndef read_csv(tmpdir, request):\n return functools.partial(_read, tmpdir, Reader=FastCsv, format='csv')\n\n\[email protected](scope='function')\ndef read_tab(tmpdir, request):\n return functools.partial(_read, tmpdir, Reader=FastTab, format='tab')\n\n\[email protected](scope='function')\ndef read_commented_header(tmpdir, request):\n return functools.partial(_read, tmpdir, Reader=FastCommentedHeader,\n format='commented_header')\n\n\[email protected](scope='function')\ndef read_rdb(tmpdir, request):\n return functools.partial(_read, tmpdir, Reader=FastRdb, format='rdb')\n\n\[email protected](scope='function')\ndef read_no_header(tmpdir, request):\n return functools.partial(_read, tmpdir, Reader=FastNoHeader,\n format='no_header')\n\n\[email protected](\"parallel\", [True, False])\ndef test_simple_data(parallel, read_basic):\n \"\"\"\n Make sure the fast reader works with basic input data.\n \"\"\"\n table = read_basic(\"A B C\\n1 2 3\\n4 5 6\", parallel=parallel)\n expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n\ndef test_read_types():\n \"\"\"\n Make sure that the read() function takes filenames,\n strings, and lists of strings in addition to file-like objects.\n \"\"\"\n t1 = ascii.read(\"a b c\\n1 2 3\\n4 5 6\", format='fast_basic', guess=False)\n # TODO: also read from file\n t2 = ascii.read(StringIO(\"a b c\\n1 2 3\\n4 5 6\"), format='fast_basic', guess=False)\n t3 = ascii.read([\"a b c\", \"1 2 3\", \"4 5 6\"], format='fast_basic', guess=False)\n assert_table_equal(t1, t2)\n assert_table_equal(t2, t3)\n\n\[email protected](\"parallel\", [True, False])\ndef test_supplied_names(parallel, read_basic):\n \"\"\"\n If passed as a parameter, names should replace any\n column names found in the header.\n \"\"\"\n table = read_basic(\"A B C\\n1 2 3\\n4 5 6\", names=('X', 'Y', 'Z'), parallel=parallel)\n expected = Table([[1, 4], [2, 5], [3, 6]], names=('X', 'Y', 'Z'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_no_header(parallel, read_basic, read_no_header):\n \"\"\"\n The header should not be read when header_start=None. Unless names is\n passed, the column names should be auto-generated.\n \"\"\"\n # Cannot set header_start=None for basic format\n with pytest.raises(ValueError):\n read_basic(\"A B C\\n1 2 3\\n4 5 6\", header_start=None, data_start=0, parallel=parallel)\n\n t2 = read_no_header(\"A B C\\n1 2 3\\n4 5 6\", parallel=parallel)\n expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('col1', 'col2', 'col3'))\n assert_table_equal(t2, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_no_header_supplied_names(parallel, read_basic, read_no_header):\n \"\"\"\n If header_start=None and names is passed as a parameter, header\n data should not be read and names should be used instead.\n \"\"\"\n table = read_no_header(\"A B C\\n1 2 3\\n4 5 6\",\n names=('X', 'Y', 'Z'), parallel=parallel)\n expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('X', 'Y', 'Z'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_comment(parallel, read_basic):\n \"\"\"\n Make sure that line comments are ignored by the C reader.\n \"\"\"\n table = read_basic(\"# comment\\nA B C\\n # another comment\\n1 2 3\\n4 5 6\", parallel=parallel)\n expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_empty_lines(parallel, read_basic):\n \"\"\"\n Make sure that empty lines are ignored by the C reader.\n \"\"\"\n table = read_basic(\"\\n\\nA B C\\n1 2 3\\n\\n\\n4 5 6\\n\\n\\n\\n\", parallel=parallel)\n expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_lstrip_whitespace(parallel, read_basic):\n \"\"\"\n Test to make sure the reader ignores whitespace at the beginning of fields.\n \"\"\"\n text = \"\"\"\n 1, 2, \\t3\n A,\\t\\t B, C\n a, b, c\n\"\"\" + ' \\n'\n\n table = read_basic(text, delimiter=',', parallel=parallel)\n expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_rstrip_whitespace(parallel, read_basic):\n \"\"\"\n Test to make sure the reader ignores whitespace at the end of fields.\n \"\"\"\n text = ' 1 ,2 \\t,3 \\nA\\t,B ,C\\t \\t \\n \\ta ,b , c \\n'\n table = read_basic(text, delimiter=',', parallel=parallel)\n expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_conversion(parallel, read_basic):\n \"\"\"\n The reader should try to convert each column to ints. If this fails, the\n reader should try to convert to floats. Failing this, it should fall back\n to strings.\n \"\"\"\n text = \"\"\"\nA B C D E\n1 a 3 4 5\n2. 1 9 10 -5.3e4\n4 2 -12 .4 six\n\"\"\"\n table = read_basic(text, parallel=parallel)\n assert_equal(table['A'].dtype.kind, 'f')\n assert table['B'].dtype.kind in ('S', 'U')\n assert_equal(table['C'].dtype.kind, 'i')\n assert_equal(table['D'].dtype.kind, 'f')\n assert table['E'].dtype.kind in ('S', 'U')\n\n\[email protected](\"parallel\", [True, False])\ndef test_delimiter(parallel, read_basic):\n \"\"\"\n Make sure that different delimiters work as expected.\n \"\"\"\n text = \"\"\"\nCOL1 COL2 COL3\n1 A -1\n2 B -2\n\"\"\"\n expected = Table([[1, 2], ['A', 'B'], [-1, -2]], names=('COL1', 'COL2', 'COL3'))\n\n for sep in ' ,\\t#;':\n table = read_basic(text.replace(' ', sep), delimiter=sep, parallel=parallel)\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_include_names(parallel, read_basic):\n \"\"\"\n If include_names is not None, the parser should read only those columns in include_names.\n \"\"\"\n table = read_basic(\"A B C D\\n1 2 3 4\\n5 6 7 8\", include_names=['A', 'D'], parallel=parallel)\n expected = Table([[1, 5], [4, 8]], names=('A', 'D'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_exclude_names(parallel, read_basic):\n \"\"\"\n If exclude_names is not None, the parser should exclude the columns in exclude_names.\n \"\"\"\n table = read_basic(\"A B C D\\n1 2 3 4\\n5 6 7 8\", exclude_names=['A', 'D'], parallel=parallel)\n expected = Table([[2, 6], [3, 7]], names=('B', 'C'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_include_exclude_names(parallel, read_basic):\n \"\"\"\n Make sure that include_names is applied before exclude_names if both are specified.\n \"\"\"\n text = \"\"\"\nA B C D E F G H\n1 2 3 4 5 6 7 8\n9 10 11 12 13 14 15 16\n\"\"\"\n table = read_basic(text, include_names=['A', 'B', 'D', 'F', 'H'],\n exclude_names=['B', 'F'], parallel=parallel)\n expected = Table([[1, 9], [4, 12], [8, 16]], names=('A', 'D', 'H'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_quoted_fields(parallel, read_basic):\n \"\"\"\n The character quotechar (default '\"') should denote the start of a field which can\n contain the field delimiter and newlines.\n \"\"\"\n if parallel:\n pytest.xfail(\"Multiprocessing can fail with quoted fields\")\n text = \"\"\"\n\"A B\" C D\n1.5 2.1 -37.1\na b \" c\n d\"\n\"\"\"\n table = read_basic(text, parallel=parallel)\n expected = Table([['1.5', 'a'], ['2.1', 'b'], ['-37.1', 'cd']], names=('A B', 'C', 'D'))\n assert_table_equal(table, expected)\n table = read_basic(text.replace('\"', \"'\"), quotechar=\"'\", parallel=parallel)\n assert_table_equal(table, expected)\n\n\[email protected](\"key,val\", [\n ('delimiter', ',,'), # multi-char delimiter\n ('comment', '##'), # multi-char comment\n ('data_start', None), # data_start=None\n ('data_start', -1), # data_start negative\n ('quotechar', '##'), # multi-char quote signifier\n ('header_start', -1), # negative header_start\n ('converters', dict((i + 1, ascii.convert_numpy(np.uint)) for i in range(3))), # passing converters\n ('Inputter', ascii.ContinuationLinesInputter), # passing Inputter\n ('header_Splitter', ascii.DefaultSplitter), # passing Splitter\n ('data_Splitter', ascii.DefaultSplitter)])\ndef test_invalid_parameters(key, val):\n \"\"\"\n Make sure the C reader raises an error if passed parameters it can't handle.\n \"\"\"\n with pytest.raises(ParameterError):\n FastBasic(**{key: val}).read('1 2 3\\n4 5 6')\n with pytest.raises(ParameterError):\n ascii.read('1 2 3\\n4 5 6',\n format='fast_basic', guess=False, **{key: val})\n\n\ndef test_invalid_parameters_other():\n with pytest.raises(TypeError):\n FastBasic(foo=7).read('1 2 3\\n4 5 6') # unexpected argument\n with pytest.raises(FastOptionsError): # don't fall back on the slow reader\n ascii.read('1 2 3\\n4 5 6', format='basic', fast_reader={'foo': 7})\n with pytest.raises(ParameterError):\n # Outputter cannot be specified in constructor\n FastBasic(Outputter=ascii.TableOutputter).read('1 2 3\\n4 5 6')\n\n\ndef test_too_many_cols1():\n \"\"\"\n If a row contains too many columns, the C reader should raise an error.\n \"\"\"\n text = \"\"\"\nA B C\n1 2 3\n4 5 6\n7 8 9 10\n11 12 13\n\"\"\"\n with pytest.raises(CParserError) as e:\n table = FastBasic().read(text)\n assert 'CParserError: an error occurred while parsing table data: too many ' \\\n 'columns found in line 3 of data' in str(e)\n\n\ndef test_too_many_cols2():\n text = \"\"\"\\\naaa,bbb\n1,2,\n3,4,\n\"\"\"\n with pytest.raises(CParserError) as e:\n table = FastCsv().read(text)\n assert 'CParserError: an error occurred while parsing table data: too many ' \\\n 'columns found in line 1 of data' in str(e)\n\n\ndef test_too_many_cols3():\n text = \"\"\"\\\naaa,bbb\n1,2,,\n3,4,\n\"\"\"\n with pytest.raises(CParserError) as e:\n table = FastCsv().read(text)\n assert 'CParserError: an error occurred while parsing table data: too many ' \\\n 'columns found in line 1 of data' in str(e)\n\n\[email protected](\"parallel\", [True, False])\ndef test_not_enough_cols(parallel, read_csv):\n \"\"\"\n If a row does not have enough columns, the FastCsv reader should add empty\n fields while the FastBasic reader should raise an error.\n \"\"\"\n text = \"\"\"\nA,B,C\n1,2,3\n4,5\n6,7,8\n\"\"\"\n table = read_csv(text, parallel=parallel)\n assert table['B'][1] is not ma.masked\n assert table['C'][1] is ma.masked\n\n with pytest.raises(CParserError) as e:\n table = FastBasic(delimiter=',').read(text)\n\n\[email protected](\"parallel\", [True, False])\ndef test_data_end(parallel, read_basic, read_rdb):\n \"\"\"\n The parameter data_end should specify where data reading ends.\n \"\"\"\n text = \"\"\"\nA B C\n1 2 3\n4 5 6\n7 8 9\n10 11 12\n\"\"\"\n table = read_basic(text, data_end=3, parallel=parallel)\n expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n # data_end supports negative indexing\n table = read_basic(text, data_end=-2, parallel=parallel)\n assert_table_equal(table, expected)\n\n text = \"\"\"\nA\\tB\\tC\nN\\tN\\tS\n1\\t2\\ta\n3\\t4\\tb\n5\\t6\\tc\n\"\"\"\n # make sure data_end works with RDB\n table = read_rdb(text, data_end=-1, parallel=parallel)\n expected = Table([[1, 3], [2, 4], ['a', 'b']], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n # positive index\n table = read_rdb(text, data_end=3, parallel=parallel)\n expected = Table([[1], [2], ['a']], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n # empty table if data_end is too small\n table = read_rdb(text, data_end=1, parallel=parallel)\n expected = Table([[], [], []], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_inf_nan(parallel, read_basic):\n \"\"\"\n Test that inf and nan-like values are correctly parsed on all platforms.\n\n Regression test for https://github.com/astropy/astropy/pull/3525\n \"\"\"\n\n text = dedent(\"\"\"\\\n A\n nan\n +nan\n -nan\n inf\n infinity\n +inf\n +infinity\n -inf\n -infinity\n \"\"\")\n\n expected = Table({'A': [np.nan, np.nan, np.nan,\n np.inf, np.inf, np.inf, np.inf,\n -np.inf, -np.inf]})\n\n table = read_basic(text, parallel=parallel)\n assert table['A'].dtype.kind == 'f'\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_fill_values(parallel, read_basic):\n \"\"\"\n Make sure that the parameter fill_values works as intended. If fill_values\n is not specified, the default behavior should be to convert '' to 0.\n \"\"\"\n text = \"\"\"\nA, B, C\n, 2, nan\na, -999, -3.4\nnan, 5, -9999\n8, nan, 7.6e12\n\"\"\"\n table = read_basic(text, delimiter=',', parallel=parallel)\n # The empty value in row A should become a masked '0'\n assert isinstance(table['A'], MaskedColumn)\n assert table['A'][0] is ma.masked\n # '0' rather than 0 because there is a string in the column\n assert_equal(table['A'].data.data[0], '0')\n assert table['A'][1] is not ma.masked\n\n table = read_basic(text, delimiter=',', fill_values=('-999', '0'), parallel=parallel)\n assert isinstance(table['B'], MaskedColumn)\n assert table['A'][0] is not ma.masked # empty value unaffected\n assert table['C'][2] is not ma.masked # -9999 is not an exact match\n assert table['B'][1] is ma.masked\n # Numeric because the rest of the column contains numeric data\n assert_equal(table['B'].data.data[1], 0.0)\n assert table['B'][0] is not ma.masked\n\n table = read_basic(text, delimiter=',', fill_values=[], parallel=parallel)\n # None of the columns should be masked\n for name in 'ABC':\n assert not isinstance(table[name], MaskedColumn)\n\n table = read_basic(text, delimiter=',', fill_values=[('', '0', 'A'),\n ('nan', '999', 'A', 'C')], parallel=parallel)\n assert np.isnan(table['B'][3]) # nan filling skips column B\n assert table['B'][3] is not ma.masked # should skip masking as well as replacing nan\n assert table['A'][0] is ma.masked\n assert table['A'][2] is ma.masked\n assert_equal(table['A'].data.data[0], '0')\n assert_equal(table['A'].data.data[2], '999')\n assert table['C'][0] is ma.masked\n assert_almost_equal(table['C'].data.data[0], 999.0)\n assert_almost_equal(table['C'][1], -3.4) # column is still of type float\n\n\[email protected](\"parallel\", [True, False])\ndef test_fill_include_exclude_names(parallel, read_csv):\n \"\"\"\n fill_include_names and fill_exclude_names should filter missing/empty value handling\n in the same way that include_names and exclude_names filter output columns.\n \"\"\"\n text = \"\"\"\nA, B, C\n, 1, 2\n3, , 4\n5, 5,\n\"\"\"\n table = read_csv(text, fill_include_names=['A', 'B'], parallel=parallel)\n assert table['A'][0] is ma.masked\n assert table['B'][1] is ma.masked\n assert table['C'][2] is not ma.masked # C not in fill_include_names\n\n table = read_csv(text, fill_exclude_names=['A', 'B'], parallel=parallel)\n assert table['C'][2] is ma.masked\n assert table['A'][0] is not ma.masked\n assert table['B'][1] is not ma.masked # A and B excluded from fill handling\n\n table = read_csv(text, fill_include_names=['A', 'B'], fill_exclude_names=['B'], parallel=parallel)\n assert table['A'][0] is ma.masked\n assert table['B'][1] is not ma.masked # fill_exclude_names applies after fill_include_names\n assert table['C'][2] is not ma.masked\n\n\[email protected](\"parallel\", [True, False])\ndef test_many_rows(parallel, read_basic):\n \"\"\"\n Make sure memory reallocation works okay when the number of rows\n is large (so that each column string is longer than INITIAL_COL_SIZE).\n \"\"\"\n text = 'A B C\\n'\n for i in range(500): # create 500 rows\n text += ' '.join([str(i) for i in range(3)])\n text += '\\n'\n\n table = read_basic(text, parallel=parallel)\n expected = Table([[0] * 500, [1] * 500, [2] * 500], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_many_columns(parallel, read_basic):\n \"\"\"\n Make sure memory reallocation works okay when the number of columns\n is large (so that each header string is longer than INITIAL_HEADER_SIZE).\n \"\"\"\n # create a string with 500 columns and two data rows\n text = ' '.join([str(i) for i in range(500)])\n text += ('\\n' + text + '\\n' + text)\n table = read_basic(text, parallel=parallel)\n expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)])\n assert_table_equal(table, expected)\n\n\ndef test_fast_reader():\n \"\"\"\n Make sure that ascii.read() works as expected by default and with\n fast_reader specified.\n \"\"\"\n text = 'a b c\\n1 2 3\\n4 5 6'\n with pytest.raises(ParameterError): # C reader can't handle regex comment\n ascii.read(text, format='fast_basic', guess=False, comment='##')\n\n # Enable multiprocessing and the fast converter\n try:\n ascii.read(text, format='basic', guess=False,\n fast_reader={'parallel': True, 'use_fast_converter': True})\n except NotImplementedError:\n # Might get this on Windows, try without parallel...\n if os.name == 'nt':\n ascii.read(text, format='basic', guess=False,\n fast_reader={'parallel': False,\n 'use_fast_converter': True})\n else:\n raise\n\n # Should raise an error if fast_reader has an invalid key\n with pytest.raises(FastOptionsError):\n ascii.read(text, format='fast_basic', guess=False, fast_reader={'foo': True})\n\n # Use the slow reader instead\n ascii.read(text, format='basic', guess=False, comment='##', fast_reader=False)\n # Will try the slow reader afterwards by default\n ascii.read(text, format='basic', guess=False, comment='##')\n\n\[email protected](\"parallel\", [True, False])\ndef test_read_tab(parallel, read_tab):\n \"\"\"\n The fast reader for tab-separated values should not strip whitespace, unlike\n the basic reader.\n \"\"\"\n if parallel:\n pytest.xfail(\"Multiprocessing can fail with quoted fields\")\n text = '1\\t2\\t3\\n a\\t b \\t\\n c\\t\" d\\n e\"\\t '\n table = read_tab(text, parallel=parallel)\n assert_equal(table['1'][0], ' a') # preserve line whitespace\n assert_equal(table['2'][0], ' b ') # preserve field whitespace\n assert table['3'][0] is ma.masked # empty value should be masked\n assert_equal(table['2'][1], ' d e') # preserve whitespace in quoted fields\n assert_equal(table['3'][1], ' ') # preserve end-of-line whitespace\n\n\[email protected](\"parallel\", [True, False])\ndef test_default_data_start(parallel, read_basic):\n \"\"\"\n If data_start is not explicitly passed to read(), data processing should\n beginning right after the header.\n \"\"\"\n text = 'ignore this line\\na b c\\n1 2 3\\n4 5 6'\n table = read_basic(text, header_start=1, parallel=parallel)\n expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_commented_header(parallel, read_commented_header):\n \"\"\"\n The FastCommentedHeader reader should mimic the behavior of the\n CommentedHeader by overriding the default header behavior of FastBasic.\n \"\"\"\n text = \"\"\"\n # A B C\n 1 2 3\n 4 5 6\n\"\"\"\n t1 = read_commented_header(text, parallel=parallel)\n expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))\n assert_table_equal(t1, expected)\n\n text = '# first commented line\\n # second commented line\\n\\n' + text\n t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel)\n assert_table_equal(t2, expected)\n t3 = read_commented_header(text, header_start=-1, data_start=0, parallel=parallel) # negative indexing allowed\n assert_table_equal(t3, expected)\n\n text += '7 8 9'\n t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel)\n expected = Table([[7], [8], [9]], names=('A', 'B', 'C'))\n assert_table_equal(t4, expected)\n\n with pytest.raises(ParameterError):\n read_commented_header(text, header_start=-1, data_start=-1, parallel=parallel) # data_start cannot be negative\n\n\[email protected](\"parallel\", [True, False])\ndef test_rdb(parallel, read_rdb):\n \"\"\"\n Make sure the FastRdb reader works as expected.\n \"\"\"\n text = \"\"\"\n\nA\\tB\\tC\n1n\\tS\\t4N\n1\\t 9\\t4.3\n\"\"\"\n table = read_rdb(text, parallel=parallel)\n expected = Table([[1], [' 9'], [4.3]], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n assert_equal(table['A'].dtype.kind, 'i')\n assert table['B'].dtype.kind in ('S', 'U')\n assert_equal(table['C'].dtype.kind, 'f')\n\n with pytest.raises(ValueError) as e:\n text = 'A\\tB\\tC\\nN\\tS\\tN\\n4\\tb\\ta' # C column contains non-numeric data\n read_rdb(text, parallel=parallel)\n assert 'Column C failed to convert' in str(e)\n\n with pytest.raises(ValueError) as e:\n text = 'A\\tB\\tC\\nN\\tN\\n1\\t2\\t3' # not enough types specified\n read_rdb(text, parallel=parallel)\n assert 'mismatch between number of column names and column types' in str(e)\n\n with pytest.raises(ValueError) as e:\n text = 'A\\tB\\tC\\nN\\tN\\t5\\n1\\t2\\t3' # invalid type for column C\n read_rdb(text, parallel=parallel)\n assert 'type definitions do not all match [num](N|S)' in str(e)\n\n\[email protected](\"parallel\", [True, False])\ndef test_data_start(parallel, read_basic):\n \"\"\"\n Make sure that data parsing begins at data_start (ignoring empty and\n commented lines but not taking quoted values into account).\n \"\"\"\n if parallel:\n pytest.xfail(\"Multiprocessing can fail with quoted fields\")\n text = \"\"\"\nA B C\n1 2 3\n4 5 6\n\n7 8 \"9\n \\t1\"\n# comment\n10 11 12\n\"\"\"\n table = read_basic(text, data_start=2, parallel=parallel)\n expected = Table([[4, 7, 10], [5, 8, 11], [6, 91, 12]], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n table = read_basic(text, data_start=3, parallel=parallel)\n # ignore empty line\n expected = Table([[7, 10], [8, 11], [91, 12]], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n with pytest.raises(CParserError) as e:\n # tries to begin in the middle of quoted field\n read_basic(text, data_start=4, parallel=parallel)\n assert 'not enough columns found in line 1 of data' in str(e)\n\n table = read_basic(text, data_start=5, parallel=parallel)\n # ignore commented line\n expected = Table([[10], [11], [12]], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n text = \"\"\"\nA B C\n1 2 3\n4 5 6\n\n7 8 9\n# comment\n10 11 12\n\"\"\"\n # make sure reading works as expected in parallel\n table = read_basic(text, data_start=2, parallel=parallel)\n expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=('A', 'B', 'C'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_quoted_empty_values(parallel, read_basic):\n \"\"\"\n Quoted empty values spanning multiple lines should be treated correctly.\n \"\"\"\n if parallel:\n pytest.xfail(\"Multiprocessing can fail with quoted fields\")\n text = 'a b c\\n1 2 \" \\n \"'\n table = read_basic(text, parallel=parallel)\n assert table['c'][0] is ma.masked # empty value masked by default\n\n\[email protected](\"parallel\", [True, False])\ndef test_csv_comment_default(parallel, read_csv):\n \"\"\"\n Unless the comment parameter is specified, the CSV reader should\n not treat any lines as comments.\n \"\"\"\n text = 'a,b,c\\n#1,2,3\\n4,5,6'\n table = read_csv(text, parallel=parallel)\n expected = Table([['#1', '4'], [2, 5], [3, 6]], names=('a', 'b', 'c'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_whitespace_before_comment(parallel, read_tab):\n \"\"\"\n Readers that don't strip whitespace from data (Tab, RDB)\n should still treat lines with leading whitespace and then\n the comment char as comment lines.\n \"\"\"\n text = 'a\\tb\\tc\\n # comment line\\n1\\t2\\t3'\n table = read_tab(text, parallel=parallel)\n expected = Table([[1], [2], [3]], names=('a', 'b', 'c'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_strip_line_trailing_whitespace(parallel, read_basic):\n \"\"\"\n Readers that strip whitespace from lines should ignore\n trailing whitespace after the last data value of each\n row.\n \"\"\"\n text = 'a b c\\n1 2 \\n3 4 5'\n with pytest.raises(CParserError) as e:\n ascii.read(StringIO(text), format='fast_basic', guess=False)\n assert 'not enough columns found in line 1' in str(e)\n\n text = 'a b c\\n 1 2 3 \\t \\n 4 5 6 '\n table = read_basic(text, parallel=parallel)\n expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_no_data(parallel, read_basic):\n \"\"\"\n As long as column names are supplied, the C reader\n should return an empty table in the absence of data.\n \"\"\"\n table = read_basic('a b c', parallel=parallel)\n expected = Table([[], [], []], names=('a', 'b', 'c'))\n assert_table_equal(table, expected)\n\n table = read_basic('a b c\\n1 2 3', data_start=2, parallel=parallel)\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_line_endings(parallel, read_basic, read_commented_header, read_rdb):\n \"\"\"\n Make sure the fast reader accepts CR and CR+LF\n as newlines.\n \"\"\"\n text = 'a b c\\n1 2 3\\n4 5 6\\n7 8 9\\n'\n expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'))\n\n for newline in ('\\r\\n', '\\r'):\n table = read_basic(text.replace('\\n', newline), parallel=parallel)\n assert_table_equal(table, expected)\n\n # Make sure the splitlines() method of FileString\n # works with CR/CR+LF line endings\n text = '#' + text\n for newline in ('\\r\\n', '\\r'):\n table = read_commented_header(text.replace('\\n', newline), parallel=parallel)\n assert_table_equal(table, expected)\n\n expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'), masked=True)\n expected['a'][0] = np.ma.masked\n expected['c'][0] = np.ma.masked\n text = 'a\\tb\\tc\\nN\\tN\\tN\\n\\t2\\t\\n4\\t5\\t6\\n7\\t8\\t9\\n'\n for newline in ('\\r\\n', '\\r'):\n table = read_rdb(text.replace('\\n', newline), parallel=parallel)\n assert_table_equal(table, expected)\n assert np.all(table == expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_store_comments(parallel, read_basic):\n \"\"\"\n Make sure that the output Table produced by the fast\n reader stores any comment lines in its meta attribute.\n \"\"\"\n text = \"\"\"\n# header comment\na b c\n# comment 2\n# comment 3\n1 2 3\n4 5 6\n\"\"\"\n table = read_basic(text, parallel=parallel, check_meta=True)\n assert_equal(table.meta['comments'],\n ['header comment', 'comment 2', 'comment 3'])\n\n\[email protected](\"parallel\", [True, False])\ndef test_empty_quotes(parallel, read_basic):\n \"\"\"\n Make sure the C reader doesn't segfault when the\n input data contains empty quotes. [#3407]\n \"\"\"\n table = read_basic('a b\\n1 \"\"\\n2 \"\"', parallel=parallel)\n expected = Table([[1, 2], [0, 0]], names=('a', 'b'))\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_fast_tab_with_names(parallel, read_tab):\n \"\"\"\n Make sure the C reader doesn't segfault when the header for the\n first column is missing [#3545]\n \"\"\"\n content = \"\"\"#\n\\tdecDeg\\tRate_pn_offAxis\\tRate_mos2_offAxis\\tObsID\\tSourceID\\tRADeg\\tversion\\tCounts_pn\\tRate_pn\\trun\\tRate_mos1\\tRate_mos2\\tInserted_pn\\tInserted_mos2\\tbeta\\tRate_mos1_offAxis\\trcArcsec\\tname\\tInserted\\tCounts_mos1\\tInserted_mos1\\tCounts_mos2\\ty\\tx\\tCounts\\toffAxis\\tRot\n-3.007559\\t0.0000\\t0.0010\\t0013140201\\t0\\t213.462574\\t0\\t2\\t0.0002\\t0\\t0.0001\\t0.0001\\t0\\t1\\t0.66\\t0.0217\\t3.0\\tfakeXMMXCS J1413.8-0300\\t3\\t1\\t2\\t1\\t398.000\\t127.000\\t5\\t13.9\\t72.3\\t\"\"\"\n head = ['A{0}'.format(i) for i in range(28)]\n table = read_tab(content, data_start=1,\n parallel=parallel, names=head)\n\n\[email protected](not os.getenv('TEST_READ_HUGE_FILE'),\n reason='Environment variable TEST_READ_HUGE_FILE must be '\n 'defined to run this test')\ndef test_read_big_table(tmpdir):\n \"\"\"Test reading of a huge file.\n\n This test generates a huge CSV file (~2.3Gb) before reading it (see\n https://github.com/astropy/astropy/pull/5319). The test is run only if the\n environment variable ``TEST_READ_HUGE_FILE`` is defined. Note that running\n the test requires quite a lot of memory (~18Gb when reading the file) !!\n\n \"\"\"\n NB_ROWS = 250000\n NB_COLS = 500\n filename = str(tmpdir.join(\"big_table.csv\"))\n\n print(\"Creating a {} rows table ({} columns).\".format(NB_ROWS, NB_COLS))\n data = np.random.random(NB_ROWS)\n t = Table(data=[data]*NB_COLS, names=[str(i) for i in range(NB_COLS)])\n data = None\n\n print(\"Saving the table to {}\".format(filename))\n t.write(filename, format='ascii.csv', overwrite=True)\n t = None\n\n print(\"Counting the number of lines in the csv, it should be {}\"\n \" + 1 (header).\".format(NB_ROWS))\n assert sum(1 for line in open(filename)) == NB_ROWS + 1\n\n print(\"Reading the file with astropy.\")\n t = Table.read(filename, format='ascii.csv', fast_reader=True)\n assert len(t) == NB_ROWS\n\n\[email protected](not os.getenv('TEST_READ_HUGE_FILE'),\n reason='Environment variable TEST_READ_HUGE_FILE must be '\n 'defined to run this test')\ndef test_read_big_table2(tmpdir):\n \"\"\"Test reading of a file with a huge column.\n \"\"\"\n # (2**32 // 2) : max value for int\n # // 10 : we use a value for rows that have 10 chars (1e9)\n # + 5 : add a few lines so the length cannot be stored by an int\n NB_ROWS = (2**32 // 2) // 10 + 5\n filename = str(tmpdir.join(\"big_table.csv\"))\n\n print(\"Creating a {} rows table.\".format(NB_ROWS))\n data = np.full(2**32 // 2 // 10 + 5, int(1e9), dtype=np.int32)\n t = Table(data=[data], names=['a'], copy=False)\n\n print(\"Saving the table to {}\".format(filename))\n t.write(filename, format='ascii.csv', overwrite=True)\n t = None\n\n print(\"Counting the number of lines in the csv, it should be {}\"\n \" + 1 (header).\".format(NB_ROWS))\n assert sum(1 for line in open(filename)) == NB_ROWS + 1\n\n print(\"Reading the file with astropy.\")\n t = Table.read(filename, format='ascii.csv', fast_reader=True)\n assert len(t) == NB_ROWS\n\n\n# fast_reader configurations: False| 'use_fast_converter'=False|True\[email protected]('reader', [0, 1, 2])\n# catch Windows environment since we cannot use _read() with custom fast_reader\[email protected](\"parallel\", [False, True])\ndef test_data_out_of_range(parallel, reader):\n \"\"\"\n Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|)\n shall be returned as 0 and +-inf respectively by the C parser, just like\n the Python parser.\n Test fast converter only to nominal accuracy.\n \"\"\"\n if os.name == 'nt':\n pytest.xfail(reason=\"Multiprocessing is currently unsupported on Windows\")\n # Python reader and strtod() are expected to return precise results\n rtol = 1.e-30\n if reader > 1:\n rtol = 1.e-15\n # passing fast_reader dict with parametrize does not work!\n if reader > 0:\n fast_reader = {'parallel': parallel, 'use_fast_converter': reader > 1}\n else:\n fast_reader = False\n if parallel:\n if reader < 1:\n pytest.skip(\"Multiprocessing only available in fast reader\")\n elif TRAVIS:\n pytest.xfail(\"Multiprocessing can sometimes fail on Travis CI\")\n\n fields = ['10.1E+199', '3.14e+313', '2048e+306', '0.6E-325', '-2.e345']\n values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf])\n t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False,\n fast_reader=fast_reader)\n read_values = np.array([col[0] for col in t.itercols()])\n assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)\n\n # test some additional corner cases\n fields = ['.0101E202', '0.000000314E+314', '1777E+305', '-1799E+305', '0.4e-324',\n '2500e-327', ' 0.0000000000000000000001024E+330']\n values = np.array([1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308])\n t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False,\n fast_reader=fast_reader)\n read_values = np.array([col[0] for col in t.itercols()])\n assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)\n\n # test corner cases again with non-standard exponent_style (auto-detection)\n if reader < 2:\n pytest.skip(\"Fortran exponent style only available in fast converter\")\n fast_reader.update({'exponent_style': 'A'})\n fields = ['.0101D202', '0.000000314d+314', '1777+305', '-1799E+305', '0.2e-323',\n '2500-327', ' 0.0000000000000000000001024Q+330']\n t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False,\n fast_reader=fast_reader)\n read_values = np.array([col[0] for col in t.itercols()])\n assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)\n\n\n# catch Windows environment since we cannot use _read() with custom fast_reader\[email protected](\"parallel\", [True, False])\ndef test_int_out_of_range(parallel):\n \"\"\"\n Integer numbers outside int range shall be returned as string columns\n consistent with the standard (Python) parser (no 'upcasting' to float).\n \"\"\"\n if os.name == 'nt':\n pytest.xfail(reason=\"Multiprocessing is currently unsupported on Windows\")\n\n imin = np.iinfo(np.int).min+1\n imax = np.iinfo(np.int).max-1\n huge = '{:d}'.format(imax+2)\n\n text = 'P M S\\n {:d} {:d} {:s}'.format(imax, imin, huge)\n expected = Table([[imax], [imin], [huge]], names=('P', 'M', 'S'))\n table = ascii.read(text, format='basic', guess=False,\n fast_reader={'parallel': parallel})\n assert_table_equal(table, expected)\n\n # check with leading zeroes to make sure strtol does not read them as octal\n text = 'P M S\\n000{:d} -0{:d} 00{:s}'.format(imax, -imin, huge)\n expected = Table([[imax], [imin], ['00'+huge]], names=('P', 'M', 'S'))\n table = ascii.read(text, format='basic', guess=False,\n fast_reader={'parallel': parallel})\n assert_table_equal(table, expected)\n\n # mixed columns should be returned as float, but if the out-of-range integer\n # shows up first, it will produce a string column - with both readers\n pytest.xfail(\"Integer fallback depends on order of rows\")\n text = 'A B\\n 12.3 {0:d}9\\n {0:d}9 45.6e7'.format(imax)\n expected = Table([[12.3, 10.*imax], [10.*imax, 4.56e8]],\n names=('A', 'B'))\n\n table = ascii.read(text, format='basic', guess=False,\n fast_reader={'parallel': parallel})\n assert_table_equal(table, expected)\n table = ascii.read(text, format='basic', guess=False, fast_reader=False)\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_fortran_reader(parallel):\n \"\"\"\n Make sure that ascii.read() can read Fortran-style exponential notation\n using the fast_reader.\n \"\"\"\n if os.name == 'nt':\n pytest.xfail(reason=\"Multiprocessing is currently unsupported on Windows\")\n\n text = 'A B C\\n100.01{:s}+99 2.0 3\\n 4.2{:s}-1 5.0{:s}-1 0.6{:s}4'\n expected = Table([[1.0001e101, 0.42], [2, 0.5], [3.0, 6000]],\n names=('A', 'B', 'C'))\n\n expstyles = {'e': 4*('E'), 'D': ('D', 'd', 'd', 'D'), 'Q': 2*('q', 'Q'),\n 'fortran': ('D', 'E', 'Q', 'd')}\n\n # C strtod (not-fast converter) can't handle Fortran exp\n with pytest.raises(FastOptionsError) as e:\n ascii.read(text.format(*(4*('D'))), format='basic', guess=False,\n fast_reader={'use_fast_converter': False,\n 'parallel': parallel, 'exponent_style': 'D'})\n assert 'fast_reader: exponent_style requires use_fast_converter' in str(e)\n\n # enable multiprocessing and the fast converter\n # iterate over all style-exponent combinations\n for s, c in expstyles.items():\n table = ascii.read(text.format(*c), format='basic', guess=False,\n fast_reader={'parallel': parallel,\n 'exponent_style': s})\n assert_table_equal(table, expected)\n\n # mixes and triple-exponents without any character using autodetect option\n text = 'A B C\\n1.0001+101 2.0E0 3\\n.42d0 0.5 6.+003'\n table = ascii.read(text, format='basic', guess=False,\n fast_reader={'parallel': parallel, 'exponent_style': 'fortran'})\n assert_table_equal(table, expected)\n\n # additional corner-case checks\n text = 'A B C\\n1.0001+101 2.0+000 3\\n0.42+000 0.5 6000.-000'\n table = ascii.read(text, format='basic', guess=False,\n fast_reader={'parallel': parallel, 'exponent_style': 'fortran'})\n assert_table_equal(table, expected)\n\n\[email protected](\"parallel\", [True, False])\ndef test_fortran_invalid_exp(parallel):\n \"\"\"\n Test Fortran-style exponential notation in the fast_reader with invalid\n exponent-like patterns (no triple-digits) to make sure they are returned\n as strings instead, as with the standard C parser.\n \"\"\"\n if os.name == 'nt':\n pytest.xfail(reason=\"Multiprocessing is currently unsupported on Windows\")\n if parallel and TRAVIS:\n pytest.xfail(\"Multiprocessing can sometimes fail on Travis CI\")\n\n fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.',\n '2', '4.56e-2.3', '8000', '4.2-122']\n values = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,\n 2, '4.56e-2.3', 8000, 4.2e-122]\n\n t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False,\n fast_reader={'parallel': parallel, 'exponent_style': 'A'})\n read_values = [col[0] for col in t.itercols()]\n assert read_values == values\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nThis module contains a general framework for defining graphs of transformations\nbetween coordinates, suitable for either spatial coordinates or more generalized\ncoordinate systems.\n\nThe fundamental idea is that each class is a node in the transformation graph,\nand transitions from one node to another are defined as functions (or methods)\nwrapped in transformation objects.\n\nThis module also includes more specific transformation classes for\ncelestial/spatial coordinate frames, generally focused around matrix-style\ntransformations that are typically how the algorithms are defined.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport heapq\nimport inspect\nimport subprocess\nfrom warnings import warn\n\nfrom abc import ABCMeta, abstractmethod\nfrom collections import defaultdict, OrderedDict\n\nimport numpy as np\n\nfrom .. import units as u\nfrom ..utils.compat import suppress\nfrom ..utils.compat.funcsigs import signature\nfrom ..utils.exceptions import AstropyWarning\nfrom ..extern import six\nfrom ..extern.six.moves import range\n\nfrom .representation import REPRESENTATION_CLASSES\n\n__all__ = ['TransformGraph', 'CoordinateTransform', 'FunctionTransform',\n 'BaseAffineTransform', 'AffineTransform',\n 'StaticMatrixTransform', 'DynamicMatrixTransform',\n 'FunctionTransformWithFiniteDifference', 'CompositeTransform']\n\n\nclass TransformGraph(object):\n \"\"\"\n A graph representing the paths between coordinate frames.\n \"\"\"\n\n def __init__(self):\n self._graph = defaultdict(dict)\n self.invalidate_cache() # generates cache entries\n\n @property\n def _cached_names(self):\n if self._cached_names_dct is None:\n self._cached_names_dct = dct = {}\n for c in self.frame_set:\n nm = getattr(c, 'name', None)\n if nm is not None:\n dct[nm] = c\n\n return self._cached_names_dct\n\n @property\n def frame_set(self):\n \"\"\"\n A `set` of all the frame classes present in this `TransformGraph`.\n \"\"\"\n if self._cached_frame_set is None:\n self._cached_frame_set = frm_set = set()\n for a in self._graph:\n frm_set.add(a)\n for b in self._graph[a]:\n frm_set.add(b)\n\n return self._cached_frame_set.copy()\n\n @property\n def frame_attributes(self):\n \"\"\"\n A `dict` of all the attributes of all frame classes in this `TransformGraph`.\n \"\"\"\n if self._cached_frame_attributes is None:\n result = {}\n for frame_cls in self.frame_set:\n result.update(frame_cls.frame_attributes)\n self._cached_frame_attributes = result\n\n return self._cached_frame_attributes\n\n def invalidate_cache(self):\n \"\"\"\n Invalidates the cache that stores optimizations for traversing the\n transform graph. This is called automatically when transforms\n are added or removed, but will need to be called manually if\n weights on transforms are modified inplace.\n \"\"\"\n self._cached_names_dct = None\n self._cached_frame_set = None\n self._cached_frame_attributes = None\n self._shortestpaths = {}\n self._composite_cache = {}\n\n def add_transform(self, fromsys, tosys, transform):\n \"\"\"\n Add a new coordinate transformation to the graph.\n\n Parameters\n ----------\n fromsys : class\n The coordinate frame class to start from.\n tosys : class\n The coordinate frame class to transform into.\n transform : CoordinateTransform or similar callable\n The transformation object. Typically a `CoordinateTransform` object,\n although it may be some other callable that is called with the same\n signature.\n\n Raises\n ------\n TypeError\n If ``fromsys`` or ``tosys`` are not classes or ``transform`` is\n not callable.\n \"\"\"\n\n if not inspect.isclass(fromsys):\n raise TypeError('fromsys must be a class')\n if not inspect.isclass(tosys):\n raise TypeError('tosys must be a class')\n if not six.callable(transform):\n raise TypeError('transform must be callable')\n\n self._graph[fromsys][tosys] = transform\n self.invalidate_cache()\n\n def remove_transform(self, fromsys, tosys, transform):\n \"\"\"\n Removes a coordinate transform from the graph.\n\n Parameters\n ----------\n fromsys : class or `None`\n The coordinate frame *class* to start from. If `None`,\n ``transform`` will be searched for and removed (``tosys`` must\n also be `None`).\n tosys : class or `None`\n The coordinate frame *class* to transform into. If `None`,\n ``transform`` will be searched for and removed (``fromsys`` must\n also be `None`).\n transform : callable or `None`\n The transformation object to be removed or `None`. If `None`\n and ``tosys`` and ``fromsys`` are supplied, there will be no\n check to ensure the correct object is removed.\n \"\"\"\n if fromsys is None or tosys is None:\n if not (tosys is None and fromsys is None):\n raise ValueError('fromsys and tosys must both be None if either are')\n if transform is None:\n raise ValueError('cannot give all Nones to remove_transform')\n\n # search for the requested transform by brute force and remove it\n for a in self._graph:\n agraph = self._graph[a]\n for b in agraph:\n if b is transform:\n del agraph[b]\n break\n else:\n raise ValueError('Could not find transform {0} in the '\n 'graph'.format(transform))\n\n else:\n if transform is None:\n self._graph[fromsys].pop(tosys, None)\n else:\n curr = self._graph[fromsys].get(tosys, None)\n if curr is transform:\n self._graph[fromsys].pop(tosys)\n else:\n raise ValueError('Current transform from {0} to {1} is not '\n '{2}'.format(fromsys, tosys, transform))\n self.invalidate_cache()\n\n def find_shortest_path(self, fromsys, tosys):\n \"\"\"\n Computes the shortest distance along the transform graph from\n one system to another.\n\n Parameters\n ----------\n fromsys : class\n The coordinate frame class to start from.\n tosys : class\n The coordinate frame class to transform into.\n\n Returns\n -------\n path : list of classes or `None`\n The path from ``fromsys`` to ``tosys`` as an in-order sequence\n of classes. This list includes *both* ``fromsys`` and\n ``tosys``. Is `None` if there is no possible path.\n distance : number\n The total distance/priority from ``fromsys`` to ``tosys``. If\n priorities are not set this is the number of transforms\n needed. Is ``inf`` if there is no possible path.\n \"\"\"\n\n inf = float('inf')\n\n # special-case the 0 or 1-path\n if tosys is fromsys:\n if tosys not in self._graph[fromsys]:\n # Means there's no transform necessary to go from it to itself.\n return [tosys], 0\n if tosys in self._graph[fromsys]:\n # this will also catch the case where tosys is fromsys, but has\n # a defined transform.\n t = self._graph[fromsys][tosys]\n return [fromsys, tosys], float(t.priority if hasattr(t, 'priority') else 1)\n\n # otherwise, need to construct the path:\n\n if fromsys in self._shortestpaths:\n # already have a cached result\n fpaths = self._shortestpaths[fromsys]\n if tosys in fpaths:\n return fpaths[tosys]\n else:\n return None, inf\n\n # use Dijkstra's algorithm to find shortest path in all other cases\n\n nodes = []\n # first make the list of nodes\n for a in self._graph:\n if a not in nodes:\n nodes.append(a)\n for b in self._graph[a]:\n if b not in nodes:\n nodes.append(b)\n\n if fromsys not in nodes or tosys not in nodes:\n # fromsys or tosys are isolated or not registered, so there's\n # certainly no way to get from one to the other\n return None, inf\n\n edgeweights = {}\n # construct another graph that is a dict of dicts of priorities\n # (used as edge weights in Dijkstra's algorithm)\n for a in self._graph:\n edgeweights[a] = aew = {}\n agraph = self._graph[a]\n for b in agraph:\n aew[b] = float(agraph[b].priority if hasattr(agraph[b], 'priority') else 1)\n\n # entries in q are [distance, count, nodeobj, pathlist]\n # count is needed because in py 3.x, tie-breaking fails on the nodes.\n # this way, insertion order is preserved if the weights are the same\n q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]\n q.insert(0, [0, -1, fromsys, []])\n\n # this dict will store the distance to node from ``fromsys`` and the path\n result = {}\n\n # definitely starts as a valid heap because of the insert line; from the\n # node to itself is always the shortest distance\n while len(q) > 0:\n d, orderi, n, path = heapq.heappop(q)\n\n if d == inf:\n # everything left is unreachable from fromsys, just copy them to\n # the results and jump out of the loop\n result[n] = (None, d)\n for d, orderi, n, path in q:\n result[n] = (None, d)\n break\n else:\n result[n] = (path, d)\n path.append(n)\n if n not in edgeweights:\n # this is a system that can be transformed to, but not from.\n continue\n for n2 in edgeweights[n]:\n if n2 not in result: # already visited\n # find where n2 is in the heap\n for i in range(len(q)):\n if q[i][2] == n2:\n break\n else:\n raise ValueError('n2 not in heap - this should be impossible!')\n\n newd = d + edgeweights[n][n2]\n if newd < q[i][0]:\n q[i][0] = newd\n q[i][3] = list(path)\n heapq.heapify(q)\n\n # cache for later use\n self._shortestpaths[fromsys] = result\n return result[tosys]\n\n def get_transform(self, fromsys, tosys):\n \"\"\"\n Generates and returns the `CompositeTransform` for a transformation\n between two coordinate systems.\n\n Parameters\n ----------\n fromsys : class\n The coordinate frame class to start from.\n tosys : class\n The coordinate frame class to transform into.\n\n Returns\n -------\n trans : `CompositeTransform` or `None`\n If there is a path from ``fromsys`` to ``tosys``, this is a\n transform object for that path. If no path could be found, this is\n `None`.\n\n Notes\n -----\n This function always returns a `CompositeTransform`, because\n `CompositeTransform` is slightly more adaptable in the way it can be\n called than other transform classes. Specifically, it takes care of\n intermediate steps of transformations in a way that is consistent with\n 1-hop transformations.\n\n \"\"\"\n if not inspect.isclass(fromsys):\n raise TypeError('fromsys is not a class')\n if not inspect.isclass(tosys):\n raise TypeError('tosys is not a class')\n\n path, distance = self.find_shortest_path(fromsys, tosys)\n\n if path is None:\n return None\n\n transforms = []\n currsys = fromsys\n for p in path[1:]: # first element is fromsys so we skip it\n transforms.append(self._graph[currsys][p])\n currsys = p\n\n fttuple = (fromsys, tosys)\n if fttuple not in self._composite_cache:\n comptrans = CompositeTransform(transforms, fromsys, tosys,\n register_graph=False)\n self._composite_cache[fttuple] = comptrans\n return self._composite_cache[fttuple]\n\n def lookup_name(self, name):\n \"\"\"\n Tries to locate the coordinate class with the provided alias.\n\n Parameters\n ----------\n name : str\n The alias to look up.\n\n Returns\n -------\n coordcls\n The coordinate class corresponding to the ``name`` or `None` if\n no such class exists.\n \"\"\"\n\n return self._cached_names.get(name, None)\n\n def get_names(self):\n \"\"\"\n Returns all available transform names. They will all be\n valid arguments to `lookup_name`.\n\n Returns\n -------\n nms : list\n The aliases for coordinate systems.\n \"\"\"\n return list(six.iterkeys(self._cached_names))\n\n def to_dot_graph(self, priorities=True, addnodes=[], savefn=None,\n savelayout='plain', saveformat=None, color_edges=True):\n \"\"\"\n Converts this transform graph to the graphviz_ DOT format.\n\n Optionally saves it (requires `graphviz`_ be installed and on your path).\n\n .. _graphviz: http://www.graphviz.org/\n\n Parameters\n ----------\n priorities : bool\n If `True`, show the priority values for each transform. Otherwise,\n the will not be included in the graph.\n addnodes : sequence of str\n Additional coordinate systems to add (this can include systems\n already in the transform graph, but they will only appear once).\n savefn : `None` or str\n The file name to save this graph to or `None` to not save\n to a file.\n savelayout : str\n The graphviz program to use to layout the graph (see\n graphviz_ for details) or 'plain' to just save the DOT graph\n content. Ignored if ``savefn`` is `None`.\n saveformat : str\n The graphviz output format. (e.g. the ``-Txxx`` option for\n the command line program - see graphviz docs for details).\n Ignored if ``savefn`` is `None`.\n color_edges : bool\n Color the edges between two nodes (frames) based on the type of\n transform. ``FunctionTransform``: red, ``StaticMatrixTransform``:\n blue, ``DynamicMatrixTransform``: green.\n\n Returns\n -------\n dotgraph : str\n A string with the DOT format graph.\n \"\"\"\n\n nodes = []\n # find the node names\n for a in self._graph:\n if a not in nodes:\n nodes.append(a)\n for b in self._graph[a]:\n if b not in nodes:\n nodes.append(b)\n for node in addnodes:\n if node not in nodes:\n nodes.append(node)\n nodenames = []\n invclsaliases = dict([(v, k) for k, v in six.iteritems(self._cached_names)])\n for n in nodes:\n if n in invclsaliases:\n nodenames.append('{0} [shape=oval label=\"{0}\\\\n`{1}`\"]'.format(n.__name__, invclsaliases[n]))\n else:\n nodenames.append(n.__name__ + '[ shape=oval ]')\n\n edgenames = []\n # Now the edges\n for a in self._graph:\n agraph = self._graph[a]\n for b in agraph:\n transform = agraph[b]\n pri = transform.priority if hasattr(transform, 'priority') else 1\n color = trans_to_color[transform.__class__] if color_edges else 'black'\n edgenames.append((a.__name__, b.__name__, pri, color))\n\n # generate simple dot format graph\n lines = ['digraph AstropyCoordinateTransformGraph {']\n lines.append('; '.join(nodenames) + ';')\n for enm1, enm2, weights, color in edgenames:\n labelstr_fmt = '[ {0} {1} ]'\n\n if priorities:\n priority_part = 'label = \"{0}\"'.format(weights)\n else:\n priority_part = ''\n\n color_part = 'color = \"{0}\"'.format(color)\n\n labelstr = labelstr_fmt.format(priority_part, color_part)\n lines.append('{0} -> {1}{2};'.format(enm1, enm2, labelstr))\n\n lines.append('')\n lines.append('overlap=false')\n lines.append('}')\n dotgraph = '\\n'.join(lines)\n\n if savefn is not None:\n if savelayout == 'plain':\n with open(savefn, 'w') as f:\n f.write(dotgraph)\n else:\n args = [savelayout]\n if saveformat is not None:\n args.append('-T' + saveformat)\n proc = subprocess.Popen(args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate(dotgraph)\n if proc.returncode != 0:\n raise IOError('problem running graphviz: \\n' + stderr)\n\n with open(savefn, 'w') as f:\n f.write(stdout)\n\n return dotgraph\n\n def to_networkx_graph(self):\n \"\"\"\n Converts this transform graph into a networkx graph.\n\n .. note::\n You must have the `networkx <http://networkx.lanl.gov/>`_\n package installed for this to work.\n\n Returns\n -------\n nxgraph : `networkx.Graph <http://networkx.lanl.gov/reference/classes.graph.html>`_\n This `TransformGraph` as a `networkx.Graph`_.\n \"\"\"\n import networkx as nx\n\n nxgraph = nx.Graph()\n\n # first make the nodes\n for a in self._graph:\n if a not in nxgraph:\n nxgraph.add_node(a)\n for b in self._graph[a]:\n if b not in nxgraph:\n nxgraph.add_node(b)\n\n # Now the edges\n for a in self._graph:\n agraph = self._graph[a]\n for b in agraph:\n transform = agraph[b]\n pri = transform.priority if hasattr(transform, 'priority') else 1\n color = trans_to_color[transform.__class__]\n nxgraph.add_edge(a, b, weight=pri, color=color)\n\n return nxgraph\n\n def transform(self, transcls, fromsys, tosys, priority=1, **kwargs):\n \"\"\"\n A function decorator for defining transformations.\n\n .. note::\n If decorating a static method of a class, ``@staticmethod``\n should be added *above* this decorator.\n\n Parameters\n ----------\n transcls : class\n The class of the transformation object to create.\n fromsys : class\n The coordinate frame class to start from.\n tosys : class\n The coordinate frame class to transform into.\n priority : number\n The priority if this transform when finding the shortest\n coordinate transform path - large numbers are lower priorities.\n\n Additional keyword arguments are passed into the ``transcls``\n constructor.\n\n Returns\n -------\n deco : function\n A function that can be called on another function as a decorator\n (see example).\n\n Notes\n -----\n This decorator assumes the first argument of the ``transcls``\n initializer accepts a callable, and that the second and third\n are ``fromsys`` and ``tosys``. If this is not true, you should just\n initialize the class manually and use `add_transform` instead of\n using this decorator.\n\n Examples\n --------\n\n ::\n\n graph = TransformGraph()\n\n class Frame1(BaseCoordinateFrame):\n ...\n\n class Frame2(BaseCoordinateFrame):\n ...\n\n @graph.transform(FunctionTransform, Frame1, Frame2)\n def f1_to_f2(f1_obj):\n ... do something with f1_obj ...\n return f2_obj\n\n\n \"\"\"\n def deco(func):\n # this doesn't do anything directly with the transform because\n # ``register_graph=self`` stores it in the transform graph\n # automatically\n transcls(func, fromsys, tosys, priority=priority,\n register_graph=self, **kwargs)\n return func\n return deco\n\n\n# <-------------------Define the builtin transform classes-------------------->\n\[email protected]_metaclass(ABCMeta)\nclass CoordinateTransform(object):\n \"\"\"\n An object that transforms a coordinate from one system to another.\n Subclasses must implement `__call__` with the provided signature.\n They should also call this superclass's ``__init__`` in their\n ``__init__``.\n\n Parameters\n ----------\n fromsys : class\n The coordinate frame class to start from.\n tosys : class\n The coordinate frame class to transform into.\n priority : number\n The priority if this transform when finding the shortest\n coordinate transform path - large numbers are lower priorities.\n register_graph : `TransformGraph` or `None`\n A graph to register this transformation with on creation, or\n `None` to leave it unregistered.\n \"\"\"\n\n def __init__(self, fromsys, tosys, priority=1, register_graph=None):\n if not inspect.isclass(fromsys):\n raise TypeError('fromsys must be a class')\n if not inspect.isclass(tosys):\n raise TypeError('tosys must be a class')\n\n self.fromsys = fromsys\n self.tosys = tosys\n self.priority = float(priority)\n\n if register_graph:\n # this will do the type-checking when it adds to the graph\n self.register(register_graph)\n else:\n if not inspect.isclass(fromsys) or not inspect.isclass(tosys):\n raise TypeError('fromsys and tosys must be classes')\n\n self.overlapping_frame_attr_names = overlap = []\n if (hasattr(fromsys, 'get_frame_attr_names') and\n hasattr(tosys, 'get_frame_attr_names')):\n # the if statement is there so that non-frame things might be usable\n # if it makes sense\n for from_nm in fromsys.get_frame_attr_names():\n if from_nm in tosys.get_frame_attr_names():\n overlap.append(from_nm)\n\n def register(self, graph):\n \"\"\"\n Add this transformation to the requested Transformation graph,\n replacing anything already connecting these two coordinates.\n\n Parameters\n ----------\n graph : a TransformGraph object\n The graph to register this transformation with.\n \"\"\"\n graph.add_transform(self.fromsys, self.tosys, self)\n\n def unregister(self, graph):\n \"\"\"\n Remove this transformation from the requested transformation\n graph.\n\n Parameters\n ----------\n graph : a TransformGraph object\n The graph to unregister this transformation from.\n\n Raises\n ------\n ValueError\n If this is not currently in the transform graph.\n \"\"\"\n graph.remove_transform(self.fromsys, self.tosys, self)\n\n @abstractmethod\n def __call__(self, fromcoord, toframe):\n \"\"\"\n Does the actual coordinate transformation from the ``fromsys`` class to\n the ``tosys`` class.\n\n Parameters\n ----------\n fromcoord : fromsys object\n An object of class matching ``fromsys`` that is to be transformed.\n toframe : object\n An object that has the attributes necessary to fully specify the\n frame. That is, it must have attributes with names that match the\n keys of the dictionary that ``tosys.get_frame_attr_names()``\n returns. Typically this is of class ``tosys``, but it *might* be\n some other class as long as it has the appropriate attributes.\n\n Returns\n -------\n tocoord : tosys object\n The new coordinate after the transform has been applied.\n \"\"\"\n\n\nclass FunctionTransform(CoordinateTransform):\n \"\"\"\n A coordinate transformation defined by a function that accepts a\n coordinate object and returns the transformed coordinate object.\n\n Parameters\n ----------\n func : callable\n The transformation function. Should have a call signature\n ``func(formcoord, toframe)``. Note that, unlike\n `CoordinateTransform.__call__`, ``toframe`` is assumed to be of type\n ``tosys`` for this function.\n fromsys : class\n The coordinate frame class to start from.\n tosys : class\n The coordinate frame class to transform into.\n priority : number\n The priority if this transform when finding the shortest\n coordinate transform path - large numbers are lower priorities.\n register_graph : `TransformGraph` or `None`\n A graph to register this transformation with on creation, or\n `None` to leave it unregistered.\n\n Raises\n ------\n TypeError\n If ``func`` is not callable.\n ValueError\n If ``func`` cannot accept two arguments.\n\n\n \"\"\"\n\n def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):\n if not six.callable(func):\n raise TypeError('func must be callable')\n\n with suppress(TypeError):\n sig = signature(func)\n kinds = [x.kind for x in sig.parameters.values()]\n if (len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2\n and sig.VAR_POSITIONAL not in kinds):\n raise ValueError('provided function does not accept two arguments')\n\n self.func = func\n\n super(FunctionTransform, self).__init__(fromsys, tosys,\n priority=priority, register_graph=register_graph)\n\n def __call__(self, fromcoord, toframe):\n res = self.func(fromcoord, toframe)\n if not isinstance(res, self.tosys):\n raise TypeError('the transformation function yielded {0} but '\n 'should have been of type {1}'.format(res, self.tosys))\n if fromcoord.data.differentials and not res.data.differentials:\n warn(\"Applied a FunctionTransform to a coordinate frame with \"\n \"differentials, but the FunctionTransform does not handle \"\n \"differentials, so they have been dropped.\", AstropyWarning)\n return res\n\n\nclass FunctionTransformWithFiniteDifference(FunctionTransform):\n r\"\"\"\n A coordinate transformation that works like a `FunctionTransform`, but\n computes velocity shifts based on the finite-difference relative to one of\n the frame attributes. Note that the transform function should *not* change\n the differential at all in this case, as any differentials will be\n overridden.\n\n When a differential is in the from coordinate, the finite difference\n calculation has two components. The first part is simple the existing\n differential, but re-orientation (using finite-difference techniques) to\n point in the direction the velocity vector has in the *new* frame. The\n second component is the \"induced\" velocity. That is, the velocity\n intrinsic to the frame itself, estimated by shifting the frame using the\n ``finite_difference_frameattr_name`` frame attribute a small amount\n (``finite_difference_dt``) in time and re-calculating the position.\n\n Parameters\n ----------\n finite_difference_frameattr_name : str or None\n The name of the frame attribute on the frames to use for the finite\n difference. Both the to and the from frame will be checked for this\n attribute, but only one needs to have it. If None, no velocity\n component induced from the frame itself will be included - only the\n re-orientation of any exsiting differential.\n finite_difference_dt : `~astropy.units.Quantity` or callable\n If a quantity, this is the size of the differential used to do the\n finite difference. If a callable, should accept\n ``(fromcoord, toframe)`` and return the ``dt`` value.\n symmetric_finite_difference : bool\n If True, the finite difference is computed as\n :math:`\\frac{x(t + \\Delta t / 2) - x(t + \\Delta t / 2)}{\\Delta t}`, or\n if False, :math:`\\frac{x(t + \\Delta t) - x(t)}{\\Delta t}`. The latter\n case has slightly better performance (and more stable finite difference\n behavior).\n\n All other parameters are identical to the initializer for\n `FunctionTransform`.\n\n \"\"\"\n\n def __init__(self, func, fromsys, tosys, priority=1, register_graph=None,\n finite_difference_frameattr_name='obstime',\n finite_difference_dt=1*u.second,\n symmetric_finite_difference=True):\n super(FunctionTransformWithFiniteDifference, self).__init__(func,\n fromsys, tosys, priority, register_graph)\n self.finite_difference_frameattr_name = finite_difference_frameattr_name\n self.finite_difference_dt = finite_difference_dt\n self.symmetric_finite_difference = symmetric_finite_difference\n\n @property\n def finite_difference_frameattr_name(self):\n return self._finite_difference_frameattr_name\n\n @finite_difference_frameattr_name.setter\n def finite_difference_frameattr_name(self, value):\n if value is None:\n self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False\n else:\n diff_attr_in_fromsys = value in self.fromsys.frame_attributes\n diff_attr_in_tosys = value in self.tosys.frame_attributes\n if diff_attr_in_fromsys or diff_attr_in_tosys:\n self._diff_attr_in_fromsys = diff_attr_in_fromsys\n self._diff_attr_in_tosys = diff_attr_in_tosys\n else:\n raise ValueError('Frame attribute name {} is not a frame '\n 'attribute of {} or {}'.format(value,\n self.fromsys,\n self.tosys))\n self._finite_difference_frameattr_name = value\n\n def __call__(self, fromcoord, toframe):\n from .representation import (CartesianRepresentation,\n CartesianDifferential)\n\n supcall = self.func\n if fromcoord.data.differentials:\n # this is the finite difference case\n\n if callable(self.finite_difference_dt):\n dt = self.finite_difference_dt(fromcoord, toframe)\n else:\n dt = self.finite_difference_dt\n halfdt = dt/2\n\n from_diffless = fromcoord.realize_frame(fromcoord.data.without_differentials())\n reprwithoutdiff = supcall(from_diffless, toframe)\n\n # first we use the existing differential to compute an offset due to\n # the already-existing velocity, but in the new frame\n fromcoord_cart = fromcoord.cartesian\n if self.symmetric_finite_difference:\n fwdxyz = (fromcoord_cart.xyz +\n fromcoord_cart.differentials['s'].d_xyz*halfdt)\n fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe)\n backxyz = (fromcoord_cart.xyz -\n fromcoord_cart.differentials['s'].d_xyz*halfdt)\n back = supcall(fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe)\n else:\n fwdxyz = (fromcoord_cart.xyz +\n fromcoord_cart.differentials['s'].d_xyz*dt)\n fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe)\n back = reprwithoutdiff\n diffxyz = (fwd.cartesian - back.cartesian).xyz / dt\n\n # now we compute the \"induced\" velocities due to any movement in\n # the frame itself over time\n attrname = self.finite_difference_frameattr_name\n if attrname is not None:\n if self.symmetric_finite_difference:\n if self._diff_attr_in_fromsys:\n kws = {attrname: getattr(from_diffless, attrname) + halfdt}\n from_diffless_fwd = from_diffless.replicate(**kws)\n else:\n from_diffless_fwd = from_diffless\n if self._diff_attr_in_tosys:\n kws = {attrname: getattr(toframe, attrname) + halfdt}\n fwd_frame = toframe.replicate_without_data(**kws)\n else:\n fwd_frame = toframe\n fwd = supcall(from_diffless_fwd, fwd_frame)\n\n if self._diff_attr_in_fromsys:\n kws = {attrname: getattr(from_diffless, attrname) - halfdt}\n from_diffless_back = from_diffless.replicate(**kws)\n else:\n from_diffless_back = from_diffless\n if self._diff_attr_in_tosys:\n kws = {attrname: getattr(toframe, attrname) - halfdt}\n back_frame = toframe.replicate_without_data(**kws)\n else:\n back_frame = toframe\n back = supcall(from_diffless_back, back_frame)\n else:\n if self._diff_attr_in_fromsys:\n kws = {attrname: getattr(from_diffless, attrname) + dt}\n from_diffless_fwd = from_diffless.replicate(**kws)\n else:\n from_diffless_fwd = from_diffless\n if self._diff_attr_in_tosys:\n kws = {attrname: getattr(toframe, attrname) + dt}\n fwd_frame = toframe.replicate_without_data(**kws)\n else:\n fwd_frame = toframe\n fwd = supcall(from_diffless_fwd, fwd_frame)\n back = reprwithoutdiff\n\n diffxyz += (fwd.cartesian - back.cartesian).xyz / dt\n\n newdiff = CartesianDifferential(diffxyz)\n reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(newdiff)\n return reprwithoutdiff.realize_frame(reprwithdiff)\n else:\n return supcall(fromcoord, toframe)\n\n\nclass BaseAffineTransform(CoordinateTransform):\n \"\"\"Base class for common functionality between the ``AffineTransform``-type\n subclasses.\n\n This base class is needed because ``AffineTransform`` and the matrix\n transform classes share the ``_apply_transform()`` method, but have\n different ``__call__()`` methods. ``StaticMatrixTransform`` passes in a\n matrix stored as a class attribute, and both of the matrix transforms pass\n in ``None`` for the offset. Hence, user subclasses would likely want to\n subclass this (rather than ``AffineTransform``) if they want to provide\n alternative transformations using this machinery.\n \"\"\"\n\n def _apply_transform(self, fromcoord, matrix, offset):\n from .representation import (UnitSphericalRepresentation,\n CartesianDifferential,\n SphericalDifferential,\n SphericalCosLatDifferential,\n RadialDifferential)\n\n data = fromcoord.data\n has_velocity = 's' in data.differentials\n\n # list of unit differentials\n _unit_diffs = (SphericalDifferential._unit_differential,\n SphericalCosLatDifferential._unit_differential)\n unit_vel_diff = (has_velocity and\n isinstance(data.differentials['s'], _unit_diffs))\n rad_vel_diff = (has_velocity and\n isinstance(data.differentials['s'], RadialDifferential))\n\n # Some initial checking to short-circuit doing any re-representation if\n # we're going to fail anyways:\n if isinstance(data, UnitSphericalRepresentation) and offset is not None:\n raise TypeError(\"Position information stored on coordinate frame \"\n \"is insufficient to do a full-space position \"\n \"transformation (representation class: {0})\"\n .format(data.__class__))\n\n elif (has_velocity and (unit_vel_diff or rad_vel_diff) and\n offset is not None and 's' in offset.differentials):\n # Coordinate has a velocity, but it is not a full-space velocity\n # that we need to do a velocity offset\n raise TypeError(\"Velocity information stored on coordinate frame \"\n \"is insufficient to do a full-space velocity \"\n \"transformation (differential class: {0})\"\n .format(data.differentials['s'].__class__))\n\n elif len(data.differentials) > 1:\n # We should never get here because the frame initializer shouldn't\n # allow more differentials, but this just adds protection for\n # subclasses that somehow skip the checks\n raise ValueError(\"Representation passed to AffineTransform contains\"\n \" multiple associated differentials. Only a single\"\n \" differential with velocity units is presently\"\n \" supported (differentials: {0}).\"\n .format(str(data.differentials)))\n\n # If the representation is a UnitSphericalRepresentation, and this is\n # just a MatrixTransform, we have to try to turn the differential into a\n # Unit version of the differential (if no radial velocity) or a\n # sphericaldifferential with zero proper motion (if only a radial\n # velocity) so that the matrix operation works\n if (has_velocity and isinstance(data, UnitSphericalRepresentation) and\n not unit_vel_diff and not rad_vel_diff):\n # retrieve just velocity differential\n unit_diff = data.differentials['s'].represent_as(\n data.differentials['s']._unit_differential, data)\n data = data.with_differentials({'s': unit_diff}) # updates key\n\n # If it's a RadialDifferential, we flat-out ignore the differentials\n # This is because, by this point (past the validation above), we can\n # only possibly be doing a rotation-only transformation, and that\n # won't change the radial differential. We later add it back in\n elif rad_vel_diff:\n data = data.without_differentials()\n\n # Convert the representation and differentials to cartesian without\n # having them attached to a frame\n rep = data.to_cartesian()\n diffs = dict([(k, diff.represent_as(CartesianDifferential, data))\n for k, diff in data.differentials.items()])\n rep = rep.with_differentials(diffs)\n\n # Only do transform if matrix is specified. This is for speed in\n # transformations that only specify an offset (e.g., LSR)\n if matrix is not None:\n # Note: this applies to both representation and differentials\n rep = rep.transform(matrix)\n\n # TODO: if we decide to allow arithmetic between representations that\n # contain differentials, this can be tidied up\n if offset is not None:\n newrep = (rep.without_differentials() +\n offset.without_differentials())\n else:\n newrep = rep.without_differentials()\n\n # We need a velocity (time derivative) and, for now, are strict: the\n # representation can only contain a velocity differential and no others.\n if has_velocity and not rad_vel_diff:\n veldiff = rep.differentials['s'] # already in Cartesian form\n\n if offset is not None and 's' in offset.differentials:\n veldiff = veldiff + offset.differentials['s']\n\n newrep = newrep.with_differentials({'s': veldiff})\n\n if isinstance(fromcoord.data, UnitSphericalRepresentation):\n # Special-case this because otherwise the return object will think\n # it has a valid distance with the default return (a\n # CartesianRepresentation instance)\n\n if has_velocity and not unit_vel_diff and not rad_vel_diff:\n # We have to first represent as the Unit types we converted to,\n # then put the d_distance information back in to the\n # differentials and re-represent as their original forms\n newdiff = newrep.differentials['s']\n _unit_cls = fromcoord.data.differentials['s']._unit_differential\n newdiff = newdiff.represent_as(_unit_cls, newrep)\n\n kwargs = dict([(comp, getattr(newdiff, comp))\n for comp in newdiff.components])\n kwargs['d_distance'] = fromcoord.data.differentials['s'].d_distance\n diffs = {'s': fromcoord.data.differentials['s'].__class__(\n copy=False, **kwargs)}\n\n elif has_velocity and unit_vel_diff:\n newdiff = newrep.differentials['s'].represent_as(\n fromcoord.data.differentials['s'].__class__, newrep)\n diffs = {'s': newdiff}\n\n else:\n diffs = newrep.differentials\n\n newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs\n newrep = newrep.with_differentials(diffs)\n\n elif has_velocity and unit_vel_diff:\n # Here, we're in the case where the representation is not\n # UnitSpherical, but the differential *is* one of the UnitSpherical\n # types. We have to convert back to that differential class or the\n # resulting frame will think it has a valid radial_velocity. This\n # can probably be cleaned up: we currently have to go through the\n # dimensional version of the differential before representing as the\n # unit differential so that the units work out (the distance length\n # unit shouldn't appear in the resulting proper motions)\n\n diff_cls = fromcoord.data.differentials['s'].__class__\n newrep = newrep.represent_as(fromcoord.data.__class__,\n diff_cls._dimensional_differential)\n newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls)\n\n # We pulled the radial differential off of the representation\n # earlier, so now we need to put it back. But, in order to do that, we\n # have to turn the representation into a repr that is compatible with\n # having a RadialDifferential\n if has_velocity and rad_vel_diff:\n newrep = newrep.represent_as(fromcoord.data.__class__)\n newrep = newrep.with_differentials(\n {'s': fromcoord.data.differentials['s']})\n\n return newrep\n\n\nclass AffineTransform(BaseAffineTransform):\n \"\"\"\n A coordinate transformation specified as a function that yields a 3 x 3\n cartesian transformation matrix and a tuple of displacement vectors.\n\n See `~astropy.coordinates.builtin_frames.galactocentric.Galactocentric` for\n an example.\n\n Parameters\n ----------\n transform_func : callable\n A callable that has the signature ``transform_func(fromcoord, toframe)``\n and returns: a (3, 3) matrix that operates on ``fromcoord`` in a\n Cartesian representation, and a ``CartesianRepresentation`` with\n (optionally) an attached velocity ``CartesianDifferential`` to represent\n a translation and offset in velocity to apply after the matrix\n operation.\n fromsys : class\n The coordinate frame class to start from.\n tosys : class\n The coordinate frame class to transform into.\n priority : number\n The priority if this transform when finding the shortest\n coordinate transform path - large numbers are lower priorities.\n register_graph : `TransformGraph` or `None`\n A graph to register this transformation with on creation, or\n `None` to leave it unregistered.\n\n Raises\n ------\n TypeError\n If ``transform_func`` is not callable\n\n \"\"\"\n\n def __init__(self, transform_func, fromsys, tosys, priority=1,\n register_graph=None):\n\n if not six.callable(transform_func):\n raise TypeError('transform_func is not callable')\n self.transform_func = transform_func\n\n super(AffineTransform, self).__init__(fromsys, tosys, priority=priority,\n register_graph=register_graph)\n\n def __call__(self, fromcoord, toframe):\n\n M, vec = self.transform_func(fromcoord, toframe)\n newrep = self._apply_transform(fromcoord, M, vec)\n\n return toframe.realize_frame(newrep)\n\n\nclass StaticMatrixTransform(BaseAffineTransform):\n \"\"\"\n A coordinate transformation defined as a 3 x 3 cartesian\n transformation matrix.\n\n This is distinct from DynamicMatrixTransform in that this kind of matrix is\n independent of frame attributes. That is, it depends *only* on the class of\n the frame.\n\n Parameters\n ----------\n matrix : array-like or callable\n A 3 x 3 matrix for transforming 3-vectors. In most cases will\n be unitary (although this is not strictly required). If a callable,\n will be called *with no arguments* to get the matrix.\n fromsys : class\n The coordinate frame class to start from.\n tosys : class\n The coordinate frame class to transform into.\n priority : number\n The priority if this transform when finding the shortest\n coordinate transform path - large numbers are lower priorities.\n register_graph : `TransformGraph` or `None`\n A graph to register this transformation with on creation, or\n `None` to leave it unregistered.\n\n Raises\n ------\n ValueError\n If the matrix is not 3 x 3\n\n \"\"\"\n\n def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):\n if six.callable(matrix):\n matrix = matrix()\n self.matrix = np.array(matrix)\n\n if self.matrix.shape != (3, 3):\n raise ValueError('Provided matrix is not 3 x 3')\n\n super(StaticMatrixTransform, self).__init__(fromsys, tosys,\n priority=priority,\n register_graph=register_graph)\n\n def __call__(self, fromcoord, toframe):\n newrep = self._apply_transform(fromcoord, self.matrix, None)\n return toframe.realize_frame(newrep)\n\n\nclass DynamicMatrixTransform(BaseAffineTransform):\n \"\"\"\n A coordinate transformation specified as a function that yields a\n 3 x 3 cartesian transformation matrix.\n\n This is similar to, but distinct from StaticMatrixTransform, in that the\n matrix for this class might depend on frame attributes.\n\n Parameters\n ----------\n matrix_func : callable\n A callable that has the signature ``matrix_func(fromcoord, toframe)`` and\n returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian\n representation to the new coordinate system.\n fromsys : class\n The coordinate frame class to start from.\n tosys : class\n The coordinate frame class to transform into.\n priority : number\n The priority if this transform when finding the shortest\n coordinate transform path - large numbers are lower priorities.\n register_graph : `TransformGraph` or `None`\n A graph to register this transformation with on creation, or\n `None` to leave it unregistered.\n\n Raises\n ------\n TypeError\n If ``matrix_func`` is not callable\n\n \"\"\"\n\n def __init__(self, matrix_func, fromsys, tosys, priority=1,\n register_graph=None):\n if not six.callable(matrix_func):\n raise TypeError('matrix_func is not callable')\n self.matrix_func = matrix_func\n\n def _transform_func(fromcoord, toframe):\n return self.matrix_func(fromcoord, toframe), None\n\n super(DynamicMatrixTransform, self).__init__(fromsys, tosys,\n priority=priority,\n register_graph=register_graph)\n\n def __call__(self, fromcoord, toframe):\n M = self.matrix_func(fromcoord, toframe)\n newrep = self._apply_transform(fromcoord, M, None)\n return toframe.realize_frame(newrep)\n\n\nclass CompositeTransform(CoordinateTransform):\n \"\"\"\n A transformation constructed by combining together a series of single-step\n transformations.\n\n Note that the intermediate frame objects are constructed using any frame\n attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate\n frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame\n attributes that are not present use the defaults.\n\n Parameters\n ----------\n transforms : sequence of `CoordinateTransform` objects\n The sequence of transformations to apply.\n fromsys : class\n The coordinate frame class to start from.\n tosys : class\n The coordinate frame class to transform into.\n priority : number\n The priority if this transform when finding the shortest\n coordinate transform path - large numbers are lower priorities.\n register_graph : `TransformGraph` or `None`\n A graph to register this transformation with on creation, or\n `None` to leave it unregistered.\n collapse_static_mats : bool\n If `True`, consecutive `StaticMatrixTransform` will be collapsed into a\n single transformation to speed up the calculation.\n\n \"\"\"\n\n def __init__(self, transforms, fromsys, tosys, priority=1,\n register_graph=None, collapse_static_mats=True):\n super(CompositeTransform, self).__init__(fromsys, tosys,\n priority=priority,\n register_graph=register_graph)\n\n if collapse_static_mats:\n transforms = self._combine_statics(transforms)\n\n self.transforms = tuple(transforms)\n\n def _combine_statics(self, transforms):\n \"\"\"\n Combines together sequences of `StaticMatrixTransform`s into a single\n transform and returns it.\n \"\"\"\n newtrans = []\n for currtrans in transforms:\n lasttrans = newtrans[-1] if len(newtrans) > 0 else None\n\n if (isinstance(lasttrans, StaticMatrixTransform) and\n isinstance(currtrans, StaticMatrixTransform)):\n combinedmat = np.dot(lasttrans.matrix, currtrans.matrix)\n newtrans[-1] = StaticMatrixTransform(combinedmat,\n lasttrans.fromsys,\n currtrans.tosys)\n else:\n newtrans.append(currtrans)\n return newtrans\n\n def __call__(self, fromcoord, toframe):\n curr_coord = fromcoord\n for t in self.transforms:\n # build an intermediate frame with attributes taken from either\n # `fromframe`, or if not there, `toframe`, or if not there, use\n # the defaults\n # TODO: caching this information when creating the transform may\n # speed things up a lot\n frattrs = {}\n for inter_frame_attr_nm in t.tosys.get_frame_attr_names():\n if hasattr(toframe, inter_frame_attr_nm):\n attr = getattr(toframe, inter_frame_attr_nm)\n frattrs[inter_frame_attr_nm] = attr\n elif hasattr(fromcoord, inter_frame_attr_nm):\n attr = getattr(fromcoord, inter_frame_attr_nm)\n frattrs[inter_frame_attr_nm] = attr\n\n curr_toframe = t.tosys(**frattrs)\n curr_coord = t(curr_coord, curr_toframe)\n\n # this is safe even in the case where self.transforms is empty, because\n # coordinate objects are immutible, so copying is not needed\n return curr_coord\n\n\n# map class names to colorblind-safe colors\ntrans_to_color = OrderedDict()\ntrans_to_color[AffineTransform] = '#555555' # gray\ntrans_to_color[FunctionTransform] = '#783001' # dark red-ish/brown\ntrans_to_color[FunctionTransformWithFiniteDifference] = '#d95f02' # red-ish\ntrans_to_color[StaticMatrixTransform] = '#7570b3' # blue-ish\ntrans_to_color[DynamicMatrixTransform] = '#1b9e77' # green-ish\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom __future__ import (absolute_import, unicode_literals, division,\n print_function)\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom ..core import Model, custom_model\nfrom ..parameters import Parameter\nfrom .. import models\n\nfrom ...utils.compat.funcsigs import signature\n\nclass NonFittableModel(Model):\n \"\"\"An example class directly subclassing Model for testing.\"\"\"\n\n a = Parameter()\n\n def __init__(self, a, model_set_axis=None):\n super(NonFittableModel, self).__init__(\n a, model_set_axis=model_set_axis)\n\n @staticmethod\n def evaluate():\n pass\n\n\ndef test_Model_instance_repr_and_str():\n m = NonFittableModel(42.5)\n assert repr(m) == \"<NonFittableModel(a=42.5)>\"\n assert (str(m) ==\n \"Model: NonFittableModel\\n\"\n \"Inputs: ()\\n\"\n \"Outputs: ()\\n\"\n \"Model set size: 1\\n\"\n \"Parameters:\\n\"\n \" a \\n\"\n \" ----\\n\"\n \" 42.5\")\n\n assert len(m) == 1\n\n\ndef test_Model_array_parameter():\n model = models.Gaussian1D(4, 2, 1)\n assert_allclose(model.param_sets, [[4], [2], [1]])\n\n\ndef test_inputless_model():\n \"\"\"\n Regression test for\n https://github.com/astropy/astropy/pull/3772#issuecomment-101821641\n \"\"\"\n\n class TestModel(Model):\n inputs = ()\n outputs = ('y',)\n a = Parameter()\n\n @staticmethod\n def evaluate(a):\n return a\n\n m = TestModel(1)\n assert m.a == 1\n assert m() == 1\n\n # Test array-like output\n m = TestModel([1, 2, 3], model_set_axis=False)\n assert len(m) == 1\n assert np.all(m() == [1, 2, 3])\n\n # Test a model set\n m = TestModel(a=[1, 2, 3], model_set_axis=0)\n assert len(m) == 3\n assert np.all(m() == [1, 2, 3])\n\n # Test a model set\n m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)\n assert len(m) == 2\n assert np.all(m() == [[1, 2, 3], [4, 5, 6]])\n\n\ndef test_ParametericModel():\n with pytest.raises(TypeError):\n models.Gaussian1D(1, 2, 3, wrong=4)\n\n\ndef test_custom_model_signature():\n \"\"\"\n Tests that the signatures for the __init__ and __call__\n methods of custom models are useful.\n \"\"\"\n\n @custom_model\n def model_a(x):\n return x\n\n assert model_a.param_names == ()\n assert model_a.n_inputs == 1\n sig = signature(model_a.__init__)\n assert list(sig.parameters.keys()) == ['self', 'args', 'kwargs']\n sig = signature(model_a.__call__)\n assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis',\n 'with_bounding_box', 'fill_value',\n 'equivalencies']\n\n @custom_model\n def model_b(x, a=1, b=2):\n return x + a + b\n\n assert model_b.param_names == ('a', 'b')\n assert model_b.n_inputs == 1\n sig = signature(model_b.__init__)\n assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']\n assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]\n sig = signature(model_b.__call__)\n assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis',\n 'with_bounding_box', 'fill_value',\n 'equivalencies']\n\n @custom_model\n def model_c(x, y, a=1, b=2):\n return x + y + a + b\n\n assert model_c.param_names == ('a', 'b')\n assert model_c.n_inputs == 2\n sig = signature(model_c.__init__)\n assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']\n assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]\n sig = signature(model_c.__call__)\n assert list(sig.parameters.keys()) == ['self', 'x', 'y', 'model_set_axis',\n 'with_bounding_box', 'fill_value',\n 'equivalencies']\n\n\ndef test_custom_model_subclass():\n \"\"\"Test that custom models can be subclassed.\"\"\"\n\n @custom_model\n def model_a(x, a=1):\n return x * a\n\n class model_b(model_a):\n # Override the evaluate from model_a\n @classmethod\n def evaluate(cls, x, a):\n return -super(model_b, cls).evaluate(x, a)\n\n b = model_b()\n assert b.param_names == ('a',)\n assert b.a == 1\n assert b(1) == -1\n\n sig = signature(model_b.__init__)\n assert list(sig.parameters.keys()) == ['self', 'a', 'kwargs']\n sig = signature(model_b.__call__)\n assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis',\n 'with_bounding_box', 'fill_value',\n 'equivalencies']\n\n\ndef test_custom_model_parametrized_decorator():\n \"\"\"Tests using custom_model as a decorator with parameters.\"\"\"\n\n def cosine(x, amplitude=1):\n return [amplitude * np.cos(x)]\n\n @custom_model(fit_deriv=cosine)\n def sine(x, amplitude=1):\n return amplitude * np.sin(x)\n\n assert issubclass(sine, Model)\n s = sine(2)\n assert_allclose(s(np.pi / 2), 2)\n assert_allclose(s.fit_deriv(0, 2), 2)\n\n\ndef test_custom_inverse():\n \"\"\"Test setting a custom inverse on a model.\"\"\"\n\n p = models.Polynomial1D(1, c0=-2, c1=3)\n # A trivial inverse for a trivial polynomial\n inv = models.Polynomial1D(1, c0=(2./3.), c1=(1./3.))\n\n with pytest.raises(NotImplementedError):\n p.inverse\n\n p.inverse = inv\n\n x = np.arange(100)\n\n assert_allclose(x, p(p.inverse(x)))\n assert_allclose(x, p.inverse(p(x)))\n\n p.inverse = None\n\n with pytest.raises(NotImplementedError):\n p.inverse\n\n\ndef test_custom_inverse_reset():\n \"\"\"Test resetting a custom inverse to the model's default inverse.\"\"\"\n\n class TestModel(Model):\n inputs = ()\n outputs = ('y',)\n\n @property\n def inverse(self):\n return models.Shift()\n\n @staticmethod\n def evaluate():\n return 0\n\n # The above test model has no meaning, nor does its inverse--this just\n # tests that setting an inverse and resetting to the default inverse works\n\n m = TestModel()\n assert isinstance(m.inverse, models.Shift)\n\n m.inverse = models.Scale()\n assert isinstance(m.inverse, models.Scale)\n\n del m.inverse\n assert isinstance(m.inverse, models.Shift)\n\n\ndef test_render_model_2d():\n imshape = (71, 141)\n image = np.zeros(imshape)\n coords = y, x = np.indices(imshape)\n\n model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3)\n\n # test points for edges\n ye, xe = [0, 35, 70], [0, 70, 140]\n # test points for floating point positions\n yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9]\n\n test_pts = [(a, b) for a in xe for b in ye]\n test_pts += [(a, b) for a in xf for b in yf]\n\n for x0, y0 in test_pts:\n model.x_mean = x0\n model.y_mean = y0\n expected = model(x, y)\n for xy in [coords, None]:\n for im in [image.copy(), None]:\n if (im is None) & (xy is None):\n # this case is tested in Fittable2DModelTester\n continue\n actual = model.render(out=im, coords=xy)\n if im is None:\n assert_allclose(actual, model.render(coords=xy))\n # assert images match\n assert_allclose(expected, actual, atol=3e-7)\n # assert model fully captured\n if (x0, y0) == (70, 35):\n boxed = model.render()\n flux = np.sum(expected)\n assert ((flux - np.sum(boxed)) / flux) < 1e-7\n # test an error is raised when the bounding box is larger than the input array\n try:\n actual = model.render(out=np.zeros((1, 1)))\n except ValueError:\n pass\n\n\ndef test_render_model_1d():\n npix = 101\n image = np.zeros(npix)\n coords = np.arange(npix)\n\n model = models.Gaussian1D()\n\n # test points\n test_pts = [0, 49.1, 49.5, 49.9, 100]\n\n # test widths\n test_stdv = np.arange(5.5, 6.7, .2)\n\n for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]:\n model.mean = x0\n model.stddev = stdv\n expected = model(coords)\n for x in [coords, None]:\n for im in [image.copy(), None]:\n if (im is None) & (x is None):\n # this case is tested in Fittable1DModelTester\n continue\n actual = model.render(out=im, coords=x)\n # assert images match\n assert_allclose(expected, actual, atol=3e-7)\n # assert model fully captured\n if (x0, stdv) == (49.5, 5.5):\n boxed = model.render()\n flux = np.sum(expected)\n assert ((flux - np.sum(boxed)) / flux) < 1e-7\n\n\ndef test_render_model_3d():\n imshape = (17, 21, 27)\n image = np.zeros(imshape)\n coords = np.indices(imshape)\n\n def ellipsoid(x, y, z, x0=13., y0=10., z0=8., a=4., b=3., c=2., amp=1.):\n rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2\n val = (rsq < 1) * amp\n return val\n\n class Ellipsoid3D(custom_model(ellipsoid)):\n @property\n def bounding_box(self):\n return ((self.z0 - self.c, self.z0 + self.c),\n (self.y0 - self.b, self.y0 + self.b),\n (self.x0 - self.a, self.x0 + self.a))\n\n model = Ellipsoid3D()\n\n # test points for edges\n ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26]\n # test points for floating point positions\n zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9]\n\n test_pts = [(x, y, z) for x in xe for y in ye for z in ze]\n test_pts += [(x, y, z) for x in xf for y in yf for z in zf]\n\n for x0, y0, z0 in test_pts:\n model.x0 = x0\n model.y0 = y0\n model.z0 = z0\n expected = model(*coords[::-1])\n for c in [coords, None]:\n for im in [image.copy(), None]:\n if (im is None) & (c is None):\n continue\n actual = model.render(out=im, coords=c)\n boxed = model.render()\n # assert images match\n assert_allclose(expected, actual)\n # assert model fully captured\n if (z0, y0, x0) == (8, 10, 13):\n boxed = model.render()\n assert (np.sum(expected) - np.sum(boxed)) == 0\n\n\ndef test_custom_bounding_box_1d():\n \"\"\"\n Tests that the bounding_box setter works.\n \"\"\"\n # 1D models\n g1 = models.Gaussian1D()\n bb = g1.bounding_box\n expected = g1.render()\n\n # assign the same bounding_box, now through the bounding_box setter\n g1.bounding_box = bb\n assert_allclose(g1.render(), expected)\n\n # 2D models\n g2 = models.Gaussian2D()\n bb = g2.bounding_box\n expected = g2.render()\n\n # assign the same bounding_box, now through the bounding_box setter\n g2.bounding_box = bb\n assert_allclose(g2.render(), expected)\n\n\ndef test_n_submodels_in_single_models():\n assert models.Gaussian1D.n_submodels() == 1\n assert models.Gaussian2D.n_submodels() == 1\n"
] |
[
[
"numpy.arange",
"numpy.prod"
],
[
"numpy.rollaxis",
"numpy.logical_not",
"numpy.asarray",
"numpy.ndim",
"numpy.logical_or",
"numpy.size",
"numpy.asanyarray",
"numpy.shape",
"numpy.any",
"numpy.mean",
"numpy.ceil",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.all",
"numpy.array"
],
[
"numpy.random.random",
"numpy.isnan",
"numpy.all",
"numpy.iinfo",
"numpy.array"
],
[
"numpy.dot",
"numpy.array"
],
[
"numpy.arange",
"numpy.indices",
"numpy.cos",
"numpy.sin",
"numpy.testing.assert_allclose",
"numpy.zeros",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wright/dymos
|
[
"9d253a16ffcc162a84ef1b4a7dddcebeda5522ac",
"9d253a16ffcc162a84ef1b4a7dddcebeda5522ac",
"9d253a16ffcc162a84ef1b4a7dddcebeda5522ac"
] |
[
"dymos/transcriptions/runge_kutta/components/runge_kutta_k_comp.py",
"dymos/transcriptions/pseudospectral/components/test/test_gauss_lobatto_interleave_comp.py",
"dymos/transcriptions/common/continuity_comp.py"
] |
[
"import numpy as np\n\nimport openmdao.api as om\nfrom ....utils.rk_methods import rk_methods\nfrom ....utils.misc import get_rate_units\nfrom ....options import options as dymos_options\n\n\nclass RungeKuttaKComp(om.ExplicitComponent):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._no_check_partials = not dymos_options['include_check_partials']\n\n def initialize(self):\n self.options.declare('num_segments', types=int,\n desc='The number of segments (timesteps) in the phase')\n\n self.options.declare('method', default='RK4', types=str,\n desc='Specific Runge-Kutta Method to use.')\n\n self.options.declare('state_options', types=dict,\n desc='Dictionary of state names/options for the phase')\n\n self.options.declare('time_units', default=None, allow_none=True, types=str,\n desc='Units of the integration variable')\n\n def configure_io(self):\n \"\"\"\n I/O creation is delayed until configure so that we can determine the shape and units for\n the states.\n \"\"\"\n self._var_names = {}\n\n num_seg = self.options['num_segments']\n rk_data = rk_methods[self.options['method']]\n num_stages = rk_data['num_stages']\n\n self.add_input('h', val=np.ones(num_seg), units=self.options['time_units'],\n desc='step size for current Runge-Kutta segment.')\n\n for name, options in self.options['state_options'].items():\n shape = options['shape']\n units = options['units']\n rate_units = get_rate_units(units, self.options['time_units'])\n\n self._var_names[name] = {}\n self._var_names[name]['f'] = 'f:{0}'.format(name)\n self._var_names[name]['k'] = 'k:{0}'.format(name)\n\n self.add_input(self._var_names[name]['f'], shape=(num_seg, num_stages) + shape,\n units=rate_units,\n desc='The predicted values of the state at the ODE evaluation points.')\n\n self.add_output(self._var_names[name]['k'], shape=(num_seg, num_stages) + shape,\n units=units, desc='RK multiplier k for each stage in the segment.')\n\n size = np.prod(shape)\n ar = np.arange(size * num_stages * num_seg, dtype=int)\n self.declare_partials(of=self._var_names[name]['k'],\n wrt=self._var_names[name]['f'],\n rows=ar, cols=ar)\n\n r = np.arange(size * num_stages * num_seg, dtype=int)\n c = np.repeat(np.arange(num_seg, dtype=int), num_stages * size)\n self.declare_partials(of=self._var_names[name]['k'],\n wrt='h',\n rows=r, cols=c)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n h = inputs['h']\n for name, options in self.options['state_options'].items():\n f = inputs[self._var_names[name]['f']]\n outputs[self._var_names[name]['k']] = f * h[:, np.newaxis, np.newaxis]\n\n def compute_partials(self, inputs, partials):\n num_stages = rk_methods[self.options['method']]['num_stages']\n h = inputs['h']\n for name, options in self.options['state_options'].items():\n size = np.prod(options['shape'])\n k_name = self._var_names[name]['k']\n f_name = self._var_names[name]['f']\n partials[k_name, f_name] = np.repeat(h, num_stages * size)\n partials[k_name, 'h'] = inputs[self._var_names[name]['f']].ravel()\n",
"import unittest\n\nimport numpy as np\n\nimport openmdao.api as om\nfrom openmdao.utils.assert_utils import assert_near_equal\nfrom dymos.utils.testing_utils import assert_check_partials\n\nimport dymos as dm\nfrom dymos.transcriptions.pseudospectral.components import GaussLobattoInterleaveComp\nfrom dymos.transcriptions.grid_data import GridData\n\n\nclass TestGaussLobattoInterleaveComp(unittest.TestCase):\n\n def setUp(self):\n dm.options['include_check_partials'] = True\n\n self.grid_data = gd = GridData(num_segments=3, segment_ends=np.array([0., 2., 4., 10.0]),\n transcription='gauss-lobatto', transcription_order=[3, 3, 3])\n\n num_disc_nodes = gd.subset_num_nodes['state_disc']\n num_col_nodes = gd.subset_num_nodes['col']\n\n self.p = om.Problem(model=om.Group())\n\n state_options = {'u': {'units': 'm', 'shape': (1,)},\n 'v': {'units': 'm', 'shape': (3, 2)}}\n\n ode_outputs = {'vehicle_cg': {'units': 'm', 'shape': (3,)}}\n\n indep_comp = om.IndepVarComp()\n self.p.model.add_subsystem('indep', indep_comp, promotes=['*'])\n\n indep_comp.add_output('state_disc:u',\n val=np.zeros((num_disc_nodes, 1)), units='m')\n\n indep_comp.add_output('state_disc:v',\n val=np.zeros((num_disc_nodes, 3, 2)), units='m')\n\n indep_comp.add_output('state_col:u',\n val=np.zeros((num_col_nodes, 1)), units='m')\n\n indep_comp.add_output('state_col:v',\n val=np.zeros((num_col_nodes, 3, 2)), units='m')\n\n indep_comp.add_output('ode_disc:cg',\n val=np.zeros((num_disc_nodes, 3)), units='m')\n\n indep_comp.add_output('ode_col:cg',\n val=np.zeros((num_col_nodes, 3)), units='m')\n\n glic = self.p.model.add_subsystem('interleave_comp',\n subsys=GaussLobattoInterleaveComp(grid_data=gd))\n\n glic.add_var('u', **state_options['u'])\n glic.add_var('v', **state_options['v'])\n glic.add_var('vehicle_cg', **ode_outputs['vehicle_cg'])\n\n self.p.model.connect('state_disc:u', 'interleave_comp.disc_values:u')\n self.p.model.connect('state_disc:v', 'interleave_comp.disc_values:v')\n self.p.model.connect('state_col:u', 'interleave_comp.col_values:u')\n self.p.model.connect('state_col:v', 'interleave_comp.col_values:v')\n\n self.p.model.connect('ode_disc:cg', 'interleave_comp.disc_values:vehicle_cg')\n self.p.model.connect('ode_col:cg', 'interleave_comp.col_values:vehicle_cg')\n\n self.p.setup(force_alloc_complex=True)\n\n self.p['state_disc:u'] = np.random.random((num_disc_nodes, 1))\n self.p['state_disc:v'] = np.random.random((num_disc_nodes, 3, 2))\n self.p['state_col:u'] = np.random.random((num_col_nodes, 1))\n self.p['state_col:v'] = np.random.random((num_col_nodes, 3, 2))\n\n self.p.run_model()\n\n def tearDown(self):\n dm.options['include_check_partials'] = False\n\n def test_results(self):\n\n u_disc = self.p.get_val('state_disc:u')\n v_disc = self.p.get_val('state_disc:v')\n u_col = self.p.get_val('state_col:u')\n v_col = self.p.get_val('state_col:v')\n\n u_all = self.p.get_val('interleave_comp.all_values:u')\n v_all = self.p.get_val('interleave_comp.all_values:v')\n\n assert_near_equal(u_all[self.grid_data.subset_node_indices['state_disc'], ...],\n u_disc)\n\n assert_near_equal(v_all[self.grid_data.subset_node_indices['state_disc'], ...],\n v_disc)\n\n assert_near_equal(u_all[self.grid_data.subset_node_indices['col'], ...],\n u_col)\n\n assert_near_equal(v_all[self.grid_data.subset_node_indices['col'], ...],\n v_col)\n\n def test_partials(self):\n cpd = self.p.check_partials(compact_print=True, method='cs', out_stream=None)\n assert_check_partials(cpd)\n\n\nif __name__ == '__main__': # pragma: no cover\n unittest.main()\n",
"import numpy as np\nimport openmdao.api as om\n\nfrom ..grid_data import GridData\nfrom ...utils.misc import get_rate_units\nfrom ...options import options as dymos_options\n\n\nclass ContinuityCompBase(om.ExplicitComponent):\n \"\"\"\n ContinuityComp defines constraints to ensure continuity between adjacent segments.\n \"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._no_check_partials = not dymos_options['include_check_partials']\n\n def initialize(self):\n\n self.options.declare('grid_data', types=GridData,\n desc='Container object for grid info')\n\n self.options.declare('state_options', types=dict,\n desc='Dictionary of state names/options for the phase')\n\n self.options.declare('control_options', types=dict,\n desc='Dictionary of control names/options for the phase')\n\n self.options.declare('time_units', default=None, allow_none=True, types=str,\n desc='Units of the integration variable')\n\n def _configure_state_continuity(self):\n state_options = self.options['state_options']\n num_segend_nodes = self.options['grid_data'].subset_num_nodes['segment_ends']\n num_segments = self.options['grid_data'].num_segments\n compressed = self.options['grid_data'].compressed\n\n if num_segments <= 1 or compressed:\n return\n\n for state_name, options in state_options.items():\n shape = options['shape'] if options['shape'] is not None else (1, )\n size = np.prod(shape)\n units = options['units']\n\n self.name_maps[state_name] = {}\n\n self.name_maps[state_name]['value_names'] = \\\n ('states:{0}'.format(state_name),\n 'defect_states:{0}'.format(state_name))\n\n self.add_input(name='states:{0}'.format(state_name),\n shape=(num_segend_nodes,) + shape,\n desc='Values of state {0} at discretization nodes'.format(\n state_name),\n units=units)\n\n self.add_output(\n name='defect_states:{0}'.format(state_name),\n shape=(num_segments - 1,) + shape,\n desc='Consistency constraint values for state {0}'.format(state_name),\n units=units)\n\n rs_size1 = np.repeat(np.arange(num_segments - 1, dtype=int), 2)\n cs_size1 = np.arange(1, num_segend_nodes - 1, dtype=int)\n\n template = np.zeros((num_segments - 1, num_segend_nodes))\n template[rs_size1, cs_size1] = 1.0\n template = np.kron(template, np.eye(size))\n rs, cs = template.nonzero()\n\n vals = np.zeros(len(rs), dtype=float)\n vals[0::2] = -1.0\n vals[1::2] = 1.0\n\n self.declare_partials(\n 'defect_states:{0}'.format(state_name),\n 'states:{0}'.format(state_name),\n val=vals, rows=rs, cols=cs,\n )\n\n def _configure_control_continuity(self):\n control_options = self.options['control_options']\n num_segend_nodes = self.options['grid_data'].subset_num_nodes['segment_ends']\n num_segments = self.options['grid_data'].num_segments\n time_units = self.options['time_units']\n\n if num_segments <= 1:\n # Control value and rate continuity is enforced even with compressed transcription\n return\n\n self.add_input('t_duration', units=time_units, val=1.0,\n desc='time duration of the phase')\n\n for control_name, options in control_options.items():\n shape = options['shape']\n size = np.prod(shape)\n units = options['units']\n rate_units = get_rate_units(units, time_units, deriv=1)\n rate2_units = get_rate_units(units, time_units, deriv=2)\n\n # Define the sparsity pattern for rate and rate2 continuity\n rs_size1 = np.repeat(np.arange(num_segments - 1, dtype=int), 2)\n cs_size1 = np.arange(1, num_segend_nodes - 1, dtype=int)\n\n template = np.zeros((num_segments - 1, num_segend_nodes))\n template[rs_size1, cs_size1] = 1.0\n template = np.kron(template, np.eye(size))\n rs, cs = template.nonzero()\n\n vals = np.zeros(len(rs), dtype=float)\n vals[0::2] = -1.0\n vals[1::2] = 1.0\n self.rate_jac_templates[control_name] = vals\n\n #\n # Setup value continuity\n #\n self.name_maps[control_name] = {}\n\n self.name_maps[control_name]['value_names'] = \\\n ('controls:{0}'.format(control_name),\n 'defect_controls:{0}'.format(control_name))\n\n self.name_maps[control_name]['rate_names'] = \\\n ('control_rates:{0}_rate'.format(control_name),\n 'defect_control_rates:{0}_rate'.format(control_name))\n\n self.name_maps[control_name]['rate2_names'] = \\\n ('control_rates:{0}_rate2'.format(control_name),\n 'defect_control_rates:{0}_rate2'.format(control_name))\n\n self.add_input(\n name='controls:{0}'.format(control_name),\n shape=(num_segend_nodes,) + shape,\n desc='Values of control {0} at discretization nodes'.format(control_name),\n units=units)\n\n self.add_output(\n name='defect_controls:{0}'.format(control_name),\n val=5*np.ones((num_segments - 1,) + shape),\n desc='Continuity constraint values for control {0}'.format(control_name),\n units=units)\n\n self.declare_partials(\n 'defect_controls:{0}'.format(control_name),\n 'controls:{0}'.format(control_name),\n val=vals, rows=rs, cols=cs,\n )\n\n #\n # Setup first derivative continuity\n #\n\n self.add_input(\n name='control_rates:{0}_rate'.format(control_name),\n shape=(num_segend_nodes,) + shape,\n desc='Values of control {0} derivative at '\n 'discretization nodes'.format(control_name),\n units=rate_units)\n\n self.add_output(\n name='defect_control_rates:{0}_rate'.format(control_name),\n shape=(num_segments - 1,) + shape,\n desc='Consistency constraint values for '\n 'control {0} derivative'.format(control_name),\n units=rate_units)\n\n self.declare_partials(\n 'defect_control_rates:{0}_rate'.format(control_name),\n 'control_rates:{0}_rate'.format(control_name),\n rows=rs, cols=cs,\n )\n\n self.declare_partials(\n 'defect_control_rates:{0}_rate'.format(control_name),\n 't_duration', dependent=True,\n )\n\n #\n # Setup second derivative continuity\n #\n\n self.add_input(\n name='control_rates:{0}_rate2'.format(control_name),\n shape=(num_segend_nodes,) + shape,\n desc='Values of control {0} second derivative '\n 'at discretization nodes'.format(control_name),\n units=rate2_units)\n\n self.add_output(\n name='defect_control_rates:{0}_rate2'.format(control_name),\n shape=(num_segments - 1,) + shape,\n desc='Consistency constraint values for control '\n '{0} second derivative'.format(control_name),\n units=rate2_units)\n\n self.declare_partials(\n 'defect_control_rates:{0}_rate2'.format(control_name),\n 'control_rates:{0}_rate2'.format(control_name),\n rows=rs, cols=cs,\n )\n\n self.declare_partials(\n 'defect_control_rates:{0}_rate2'.format(control_name),\n 't_duration', dependent=True\n )\n\n def configure_io(self):\n \"\"\"\n I/O creation is delayed until configure so that we can determine the shape and units for\n the states.\n \"\"\"\n self.rate_jac_templates = {}\n self.name_maps = {}\n\n self._configure_state_continuity()\n self._configure_control_continuity()\n\n def _compute_state_continuity(self, inputs, outputs):\n state_options = self.options['state_options']\n num_segments = self.options['grid_data'].num_segments\n compressed = self.options['grid_data'].compressed\n\n if num_segments <= 1 or compressed:\n return\n\n for state_name, options in state_options.items():\n input_name, output_name = self.name_maps[state_name]['value_names']\n end_vals = inputs[input_name][1:-1:2, ...]\n start_vals = inputs[input_name][2:-1:2, ...]\n outputs[output_name] = start_vals - end_vals\n\n def _compute_control_continuity(self, inputs, outputs):\n control_options = self.options['control_options']\n\n dt_dptau = inputs['t_duration'] / 2.0\n\n for name, options in control_options.items():\n input_name, output_name = self.name_maps[name]['value_names']\n end_vals = inputs[input_name][1:-1:2, ...]\n start_vals = inputs[input_name][2:-1:2, ...]\n outputs[output_name] = start_vals - end_vals\n\n input_name, output_name = self.name_maps[name]['rate_names']\n end_vals = inputs[input_name][1:-1:2, ...]\n start_vals = inputs[input_name][2:-1:2, ...]\n outputs[output_name] = (start_vals - end_vals) * dt_dptau\n\n input_name, output_name = self.name_maps[name]['rate2_names']\n end_vals = inputs[input_name][1:-1:2, ...]\n start_vals = inputs[input_name][2:-1:2, ...]\n outputs[output_name] = (start_vals - end_vals) * dt_dptau ** 2\n\n def compute(self, inputs, outputs):\n self._compute_state_continuity(inputs, outputs)\n self._compute_control_continuity(inputs, outputs)\n\n def compute_partials(self, inputs, partials):\n\n control_options = self.options['control_options']\n dt_dptau = 0.5 * inputs['t_duration']\n\n for control_name, options in control_options.items():\n input_name, output_name = self.name_maps[control_name]['rate_names']\n val = self.rate_jac_templates[control_name]\n partials[output_name, input_name] = val * dt_dptau\n\n end_vals = inputs[input_name][1:-1:2, ...]\n start_vals = inputs[input_name][2:-1:2, ...]\n\n partials[output_name, 't_duration'] = 0.5 * (start_vals - end_vals)\n\n input_name, output_name = self.name_maps[control_name]['rate2_names']\n val = self.rate_jac_templates[control_name]\n partials[output_name, input_name] = val * dt_dptau**2\n\n end_vals = inputs[input_name][1:-1:2, ...]\n start_vals = inputs[input_name][2:-1:2, ...]\n\n partials[output_name, 't_duration'] = (start_vals - end_vals) * dt_dptau\n\n\nclass GaussLobattoContinuityComp(ContinuityCompBase):\n \"\"\"\n ContinuityComp defines constraints to ensure continuity between adjacent segments.\n \"\"\"\n def _configure_state_continuity(self):\n state_options = self.options['state_options']\n num_segments = self.options['grid_data'].num_segments\n compressed = self.options['grid_data'].compressed\n\n if num_segments <= 1:\n return\n\n super(GaussLobattoContinuityComp, self)._configure_state_continuity()\n\n for state_name, options in state_options.items():\n if options['continuity'] and not compressed:\n\n # linear if states are optimized, because they are dvs.\n # but nonlinear if solve_segments, because its like multiple shooting\n is_linear = not options['solve_segments']\n self.add_constraint(name='defect_states:{0}'.format(state_name),\n equals=0.0, scaler=1.0, linear=is_linear)\n\n def _configure_control_continuity(self):\n control_options = self.options['control_options']\n num_segments = self.options['grid_data'].num_segments\n compressed = self.options['grid_data'].compressed\n\n if num_segments <= 1:\n # Control value and rate continuity is enforced even with compressed transcription\n return\n\n super(GaussLobattoContinuityComp, self)._configure_control_continuity()\n\n for control_name, options in control_options.items():\n\n if options['continuity'] and not compressed:\n self.add_constraint(name='defect_controls:{0}'.format(control_name),\n equals=0.0, scaler=1.0, linear=True)\n\n #\n # Setup first derivative continuity\n #\n\n if options['rate_continuity']:\n self.add_constraint(name='defect_control_rates:{0}_rate'.format(control_name),\n equals=0.0, scaler=options['rate_continuity_scaler'],\n linear=False)\n\n #\n # Setup second derivative continuity\n #\n\n if options['rate2_continuity']:\n self.add_constraint(name='defect_control_rates:{0}_rate2'.format(control_name),\n equals=0.0, scaler=options['rate2_continuity_scaler'],\n linear=False)\n\n\nclass RadauPSContinuityComp(ContinuityCompBase):\n \"\"\"\n ContinuityComp defines constraints to ensure continuity between adjacent segments.\n \"\"\"\n def _configure_state_continuity(self):\n state_options = self.options['state_options']\n num_segments = self.options['grid_data'].num_segments\n compressed = self.options['grid_data'].compressed\n\n if num_segments <= 1:\n return\n\n super(RadauPSContinuityComp, self)._configure_state_continuity()\n\n for state_name, options in state_options.items():\n if options['continuity'] and not compressed:\n # linear if states are optimized, because they are dvs.\n # but nonlinear if solve_segments, because its like multiple shooting\n is_linear = not options['solve_segments']\n\n self.add_constraint(name='defect_states:{0}'.format(state_name),\n equals=0.0, scaler=1.0, linear=is_linear)\n\n def _configure_control_continuity(self):\n control_options = self.options['control_options']\n num_segments = self.options['grid_data'].num_segments\n\n if num_segments <= 1:\n # Control value and rate continuity is enforced even with compressed transcription\n return\n\n super(RadauPSContinuityComp, self)._configure_control_continuity()\n\n for control_name, options in control_options.items():\n if options['continuity']:\n self.add_constraint(name='defect_controls:{0}'.format(control_name),\n equals=0.0, scaler=1.0, linear=False)\n\n #\n # Setup first derivative continuity\n #\n\n if options['rate_continuity']:\n self.add_constraint(name='defect_control_rates:{0}_rate'.format(control_name),\n equals=0.0, scaler=options['rate_continuity_scaler'],\n linear=False)\n\n #\n # Setup second derivative continuity\n #\n\n if options['rate2_continuity']:\n self.add_constraint(name='defect_control_rates:{0}_rate2'.format(control_name),\n equals=0.0, scaler=options['rate2_continuity_scaler'],\n linear=False)\n"
] |
[
[
"numpy.arange",
"numpy.repeat",
"numpy.prod",
"numpy.ones"
],
[
"numpy.array",
"numpy.random.random",
"numpy.zeros"
],
[
"numpy.arange",
"numpy.eye",
"numpy.ones",
"numpy.prod",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
noveens/sampling_cf
|
[
"e135819b1e7310ee58edbbd138f303e5240a2619"
] |
[
"pytorch_models/NeuMF.py"
] |
[
"import torch\nimport torch.nn as nn\n\nfrom pytorch_models.MF import BaseMF\n\nclass GMF(BaseMF):\n def __init__(self, hyper_params):\n super(GMF, self).__init__(hyper_params)\n \n self.final = nn.Linear(hyper_params['latent_size'], 1)\n self.dropout = nn.Dropout(hyper_params['dropout'])\n\n def get_score(self, user_id, item_id):\n # For the FM\n user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)\n item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)\n\n # Embed Latent space\n user = self.dropout(self.user_embedding(user_id.view(-1)))\n item = self.dropout(self.item_embedding(item_id.view(-1)))\n joint = user * item\n rating = self.final(joint)[:, 0].view(user_id.shape) # [bsz]\n return user_bias + item_bias + self.global_bias + rating\n\nclass MLP(BaseMF):\n def __init__(self, hyper_params):\n super(MLP, self).__init__(hyper_params)\n\n self.project = nn.Sequential(\n nn.Dropout(hyper_params['dropout']),\n nn.Linear(2 * hyper_params['latent_size'], hyper_params['latent_size']),\n nn.ReLU(),\n nn.Linear(hyper_params['latent_size'], hyper_params['latent_size'])\n )\n self.final = nn.Linear(hyper_params['latent_size'], 1)\n self.dropout = nn.Dropout(hyper_params['dropout'])\n\n def get_score(self, user_id, item_id):\n # For the FM\n user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)\n item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)\n\n # Embed Latent space\n user = self.dropout(self.user_embedding(user_id.view(-1)))\n item = self.dropout(self.item_embedding(item_id.view(-1)))\n \n joint = torch.cat([ user, item ], dim = -1)\n joint = self.project(joint)\n rating = self.final(joint)[:, 0].view(user_id.shape)\n return user_bias + item_bias + self.global_bias + rating\n\nclass NeuMF(BaseMF):\n def __init__(self, hyper_params):\n super(NeuMF, self).__init__(hyper_params, keep_gamma = False)\n \n self.gmf_user_embedding = nn.Embedding(hyper_params['total_users'], hyper_params['latent_size'])\n self.gmf_item_embedding = nn.Embedding(hyper_params['total_items'], hyper_params['latent_size'])\n\n self.mlp_user_embedding = nn.Embedding(hyper_params['total_users'], hyper_params['latent_size'])\n self.mlp_item_embedding = nn.Embedding(hyper_params['total_items'], hyper_params['latent_size'])\n\n self.project = nn.Sequential(\n nn.Dropout(hyper_params['dropout']),\n nn.Linear(2 * hyper_params['latent_size'], hyper_params['latent_size']),\n nn.ReLU(),\n nn.Linear(hyper_params['latent_size'], hyper_params['latent_size'])\n )\n self.final = nn.Linear(2 * hyper_params['latent_size'], 1)\n self.dropout = nn.Dropout(hyper_params['dropout'])\n\n def init(self, gmf_model, mlp_model):\n with torch.no_grad():\n self.gmf_user_embedding.weight.data = gmf_model.user_embedding.weight.data\n self.gmf_item_embedding.weight.data = gmf_model.item_embedding.weight.data\n\n self.mlp_user_embedding.weight.data = mlp_model.user_embedding.weight.data\n self.mlp_item_embedding.weight.data = mlp_model.item_embedding.weight.data\n\n for i in range(len(self.project)): \n try:\n self.project[i].weight.data = mlp_model.project[i].weight.data\n self.project[i].bias.data = mlp_model.project[i].bias.data\n except: pass\n\n self.final.weight.data = torch.cat([ gmf_model.final.weight.data, mlp_model.final.weight.data ], dim = -1)\n self.final.bias.data = 0.5 * (gmf_model.final.bias.data + mlp_model.final.bias.data)\n\n self.user_bias.data = 0.5 * (gmf_model.user_bias.data + mlp_model.user_bias.data)\n self.item_bias.data = 0.5 * (gmf_model.item_bias.data + mlp_model.item_bias.data)\n\n def get_score(self, user_id, item_id):\n # For the FM\n user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)\n item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)\n\n # GMF Part\n user = self.dropout(self.gmf_user_embedding(user_id.view(-1))) # [bsz x 32]\n item = self.dropout(self.gmf_item_embedding(item_id.view(-1))) # [bsz x 32]\n gmf_joint = user * item\n\n # MLP Part\n user = self.dropout(self.mlp_user_embedding(user_id.view(-1))) # [bsz x 32]\n item = self.dropout(self.mlp_item_embedding(item_id.view(-1))) # [bsz x 32]\n mlp_joint = torch.cat([ user, item ], dim = -1)\n mlp_joint = self.project(mlp_joint)\n\n # NeuMF\n final = torch.cat([ gmf_joint, mlp_joint ], dim = -1)\n rating = self.final(final)[:, 0].view(user_id.shape) # [bsz]\n\n return user_bias + item_bias + self.global_bias + rating\n"
] |
[
[
"torch.nn.Dropout",
"torch.cat",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.no_grad",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ostodieck/sharpy
|
[
"aed86428ff88fd14d36cabd91cf7e04b5fc9a39a",
"aed86428ff88fd14d36cabd91cf7e04b5fc9a39a"
] |
[
"tests/coupled/static/smith_g_4deg/generate_smith_g_4deg.py",
"sharpy/postproc/stallcheck.py"
] |
[
"import h5py as h5\nimport numpy as np\nimport configparser\nimport os\n\nimport sharpy.utils.algebra as algebra\n\ncase_name = 'smith_g_4deg'\nroute = os.path.dirname(os.path.realpath(__file__)) + '/'\n\n# flight conditions\nu_inf = 25\nrho = 0.08891\nalpha = 4\nbeta = 0\nc_ref = 1\nb_ref = 16\nsweep = 0*np.pi/180.\naspect_ratio = 32 # = total wing span (chord = 1)\n\nalpha_rad = alpha*np.pi/180\n\n# main geometry data\nmain_span = aspect_ratio/2./np.cos(sweep)\nmain_chord = 1.0\nmain_ea = 0.5\nmain_sigma = 1\nmain_airfoil_P = 0\nmain_airfoil_M = 0\n\nn_surfaces = 2\n\n# discretisation data\nnum_elem_main = 10\n\nnum_node_elem = 3\nnum_elem = num_elem_main + num_elem_main\nnum_node_main = num_elem_main*(num_node_elem - 1) + 1\nnum_node = num_node_main + (num_node_main - 1)\n\nm_main = 10\n\n\ndef clean_test_files():\n fem_file_name = route + '/' + case_name + '.fem.h5'\n if os.path.isfile(fem_file_name):\n os.remove(fem_file_name)\n\n aero_file_name = route + '/' + case_name + '.aero.h5'\n if os.path.isfile(aero_file_name):\n os.remove(aero_file_name)\n\n solver_file_name = route + '/' + case_name + '.sharpy'\n if os.path.isfile(solver_file_name):\n os.remove(solver_file_name)\n\n flightcon_file_name = route + '/' + case_name + '.flightcon.txt'\n if os.path.isfile(flightcon_file_name):\n os.remove(flightcon_file_name)\n\n\ndef generate_fem_file():\n # placeholders\n # coordinates\n global x, y, z\n x = np.zeros((num_node, ))\n y = np.zeros((num_node, ))\n z = np.zeros((num_node, ))\n # struct twist\n structural_twist = np.zeros((num_elem, num_node_elem))\n # beam number\n beam_number = np.zeros((num_elem, ), dtype=int)\n # frame of reference delta\n frame_of_reference_delta = np.zeros((num_elem, num_node_elem, 3))\n # connectivities\n conn = np.zeros((num_elem, num_node_elem), dtype=int)\n # stiffness\n num_stiffness = 1\n ea = 1e5\n ga = 1e5\n gj = 1e4\n eiy = 2e4\n eiz = 5e6\n sigma = 1.\n base_stiffness = sigma*np.diag([ea, ga, ga, gj, eiy, eiz])\n stiffness = np.zeros((num_stiffness, 6, 6))\n stiffness[0, :, :] = main_sigma*base_stiffness\n elem_stiffness = np.zeros((num_elem,), dtype=int)\n # mass\n num_mass = 1\n m_base = 0.75\n j_base = 0.1\n base_mass = np.diag([m_base, m_base, m_base, j_base, j_base, j_base])\n mass = np.zeros((num_mass, 6, 6))\n mass[0, :, :] = base_mass\n elem_mass = np.zeros((num_elem,), dtype=int)\n # boundary conditions\n boundary_conditions = np.zeros((num_node, ), dtype=int)\n boundary_conditions[0] = 1\n # applied forces\n # n_app_forces = 2\n # node_app_forces = np.zeros((n_app_forces,), dtype=int)\n app_forces = np.zeros((num_node, 6))\n\n spacing_param = 4\n\n # right wing (beam 0) --------------------------------------------------------------\n working_elem = 0\n working_node = 0\n beam_number[working_elem:working_elem + num_elem_main] = 0\n domain = np.linspace(0, 1.0, num_node_main)\n # 16 - (np.geomspace(20, 4, 10) - 4)\n # x[working_node:working_node + num_node_main] = np.sin(sweep)*(main_span - (np.geomspace(main_span + spacing_param,\n # 0 + spacing_param,\n # num_node_main)\n # - spacing_param))\n # y[working_node:working_node + num_node_main] = np.abs(np.cos(sweep)*(main_span - (np.geomspace(main_span + spacing_param,\n # 0 + spacing_param,\n # num_node_main)\n # - spacing_param)))\n y[0] = 0\n y[working_node:working_node + num_node_main] = np.cos(sweep)*np.linspace(0.0, main_span, num_node_main)\n x[working_node:working_node + num_node_main] = np.sin(sweep)*np.linspace(0.0, main_span, num_node_main)\n for ielem in range(num_elem_main):\n for inode in range(num_node_elem):\n frame_of_reference_delta[working_elem + ielem, inode, :] = [-1, 0, 0]\n # connectivity\n for ielem in range(num_elem_main):\n conn[working_elem + ielem, :] = ((np.ones((3,))*(working_elem + ielem)*(num_node_elem - 1)) +\n [0, 2, 1])\n elem_stiffness[working_elem:working_elem + num_elem_main] = 0\n elem_mass[working_elem:working_elem + num_elem_main] = 0\n boundary_conditions[0] = 1\n boundary_conditions[working_node + num_node_main - 1] = -1\n working_elem += num_elem_main\n working_node += num_node_main\n\n # left wing (beam 1) --------------------------------------------------------------\n beam_number[working_elem:working_elem + num_elem_main] = 1\n domain = np.linspace(-1.0, 0.0, num_node_main)\n tempy = np.linspace(-main_span, 0.0, num_node_main)\n x[working_node:working_node + num_node_main - 1] = -np.sin(sweep)*tempy[0:-1]\n y[working_node:working_node + num_node_main - 1] = np.cos(sweep)*tempy[0:-1]\n # x[working_node:working_node + num_node_main - 1] = -np.sin(sweep)*(main_span - (np.geomspace(0 + spacing_param,\n # main_span + spacing_param,\n # num_node_main)[:-1]\n # - spacing_param))\n # y[working_node:working_node + num_node_main - 1] = -np.abs(np.cos(sweep)*(main_span - (np.geomspace(0 + spacing_param,\n # main_span + spacing_param,\n # num_node_main)[:-1]\n # - spacing_param)))\n for ielem in range(num_elem_main):\n for inode in range(num_node_elem):\n frame_of_reference_delta[working_elem + ielem, inode, :] = [-1, 0, 0]\n # connectivity\n for ielem in range(num_elem_main):\n conn[working_elem + ielem, :] = ((np.ones((3,))*(working_elem + ielem)*(num_node_elem - 1)) +\n [0, 2, 1]) + 1\n conn[working_elem + num_elem_main - 1, 1] = 0\n elem_stiffness[working_elem:working_elem + num_elem_main] = 0\n elem_mass[working_elem:working_elem + num_elem_main] = 0\n boundary_conditions[working_node] = -1\n working_elem += num_elem_main\n working_node += num_node_main - 1\n\n with h5.File(route + '/' + case_name + '.fem.h5', 'a') as h5file:\n coordinates = h5file.create_dataset('coordinates', data=np.column_stack((x, y, z)))\n conectivities = h5file.create_dataset('connectivities', data=conn)\n num_nodes_elem_handle = h5file.create_dataset(\n 'num_node_elem', data=num_node_elem)\n num_nodes_handle = h5file.create_dataset(\n 'num_node', data=num_node)\n num_elem_handle = h5file.create_dataset(\n 'num_elem', data=num_elem)\n stiffness_db_handle = h5file.create_dataset(\n 'stiffness_db', data=stiffness)\n stiffness_handle = h5file.create_dataset(\n 'elem_stiffness', data=elem_stiffness)\n mass_db_handle = h5file.create_dataset(\n 'mass_db', data=mass)\n mass_handle = h5file.create_dataset(\n 'elem_mass', data=elem_mass)\n frame_of_reference_delta_handle = h5file.create_dataset(\n 'frame_of_reference_delta', data=frame_of_reference_delta)\n structural_twist_handle = h5file.create_dataset(\n 'structural_twist', data=structural_twist)\n bocos_handle = h5file.create_dataset(\n 'boundary_conditions', data=boundary_conditions)\n beam_handle = h5file.create_dataset(\n 'beam_number', data=beam_number)\n app_forces_handle = h5file.create_dataset(\n 'app_forces', data=app_forces)\n # node_app_forces_handle = h5file.create_dataset(\n # 'node_app_forces', data=node_app_forces)\n\n\ndef generate_aero_file():\n global x, y, z\n airfoil_distribution = np.zeros((num_elem, num_node_elem), dtype=int)\n surface_distribution = np.zeros((num_elem,), dtype=int) - 1\n surface_m = np.zeros((n_surfaces, ), dtype=int)\n m_distribution = 'uniform'\n aero_node = np.zeros((num_node,), dtype=bool)\n twist = np.zeros((num_elem, 3))\n chord = np.zeros((num_elem, 3))\n elastic_axis = np.zeros((num_elem, 3,))\n\n working_elem = 0\n working_node = 0\n # right wing (surface 0, beam 0)\n i_surf = 0\n airfoil_distribution[working_elem:working_elem + num_elem_main, :] = 0\n surface_distribution[working_elem:working_elem + num_elem_main] = i_surf\n surface_m[i_surf] = m_main\n aero_node[working_node:working_node + num_node_main] = True\n chord[:] = main_chord\n elastic_axis[:] = main_ea\n working_elem += num_elem_main\n working_node += num_node_main\n\n # left wing (surface 1, beam 1)\n i_surf = 1\n airfoil_distribution[working_elem:working_elem + num_elem_main, :] = 0\n surface_distribution[working_elem:working_elem + num_elem_main] = i_surf\n surface_m[i_surf] = m_main\n aero_node[working_node:working_node + num_node_main - 1] = True\n # chord[working_node:working_node + num_node_main - 1] = main_chord\n # elastic_axis[working_node:working_node + num_node_main - 1] = main_ea\n working_elem += num_elem_main\n working_node += num_node_main - 1\n\n with h5.File(route + '/' + case_name + '.aero.h5', 'a') as h5file:\n airfoils_group = h5file.create_group('airfoils')\n # add one airfoil\n naca_airfoil_main = airfoils_group.create_dataset('0', data=np.column_stack(\n generate_naca_camber(P=main_airfoil_P, M=main_airfoil_M)))\n # chord\n chord_input = h5file.create_dataset('chord', data=chord)\n dim_attr = chord_input .attrs['units'] = 'm'\n\n # twist\n twist_input = h5file.create_dataset('twist', data=twist)\n dim_attr = twist_input.attrs['units'] = 'rad'\n\n # airfoil distribution\n airfoil_distribution_input = h5file.create_dataset('airfoil_distribution', data=airfoil_distribution)\n\n surface_distribution_input = h5file.create_dataset('surface_distribution', data=surface_distribution)\n surface_m_input = h5file.create_dataset('surface_m', data=surface_m)\n m_distribution_input = h5file.create_dataset('m_distribution', data=m_distribution.encode('ascii', 'ignore'))\n\n aero_node_input = h5file.create_dataset('aero_node', data=aero_node)\n elastic_axis_input = h5file.create_dataset('elastic_axis', data=elastic_axis)\n\n\ndef generate_naca_camber(M=0, P=0):\n m = M*1e-2\n p = P*1e-1\n\n def naca(x, m, p):\n if x < 1e-6:\n return 0.0\n elif x < p:\n return m/(p*p)*(2*p*x - x*x)\n elif x > p and x < 1+1e-6:\n return m/((1-p)*(1-p))*(1 - 2*p + 2*p*x - x*x)\n\n x_vec = np.linspace(0, 1, 1000)\n y_vec = np.array([naca(x, m, p) for x in x_vec])\n return x_vec, y_vec\n\n\ndef generate_solver_file(horseshoe=False):\n file_name = route + '/' + case_name + '.sharpy'\n # config = configparser.ConfigParser()\n import configobj\n config = configobj.ConfigObj()\n config.filename = file_name\n config['SHARPy'] = {'case': case_name,\n 'route': route,\n 'flow': ['BeamLoader', 'AerogridLoader', 'StaticCoupled', 'AerogridPlot', 'BeamPlot', 'AeroForcesCalculator', 'WriteVariablesTime'],\n # 'flow': ['BeamLoader', 'NonLinearStatic', 'BeamPlot'],\n 'write_screen': 'off',\n 'write_log': 'on',\n 'log_folder': route + '/output/',\n 'log_file': case_name + '.log'}\n config['BeamLoader'] = {'unsteady': 'off',\n 'orientation': algebra.euler2quat(np.array([0.0,\n alpha_rad,\n beta*np.pi/180]))}\n config['StaticCoupled'] = {'print_info': 'on',\n 'structural_solver': 'NonLinearStatic',\n 'structural_solver_settings': {'print_info': 'off',\n 'max_iterations': 150,\n 'num_load_steps': 1,\n 'delta_curved': 1e-5,\n 'min_delta': 1e-8,\n 'gravity_on': 'on',\n 'gravity': 9.754},\n 'aero_solver': 'StaticUvlm',\n 'aero_solver_settings': {'print_info': 'off',\n 'horseshoe': 'on',\n 'num_cores': 4,\n 'n_rollup': 100,\n 'rollup_dt': main_chord/m_main/u_inf,\n 'rollup_aic_refresh': 1,\n 'rollup_tolerance': 1e-4,\n 'velocity_field_generator': 'SteadyVelocityField',\n 'velocity_field_input': {'u_inf': u_inf,\n 'u_inf_direction': [1., 0, 0]},\n 'rho': rho,\n 'alpha': alpha_rad,\n 'beta': beta},\n 'max_iter': 100,\n 'n_load_steps': 5,\n 'tolerance': 1e-5,\n 'relaxation_factor': 0.}\n config['WriteVariablesTime'] = {'cleanup_old_solution': 'on',\n 'folder': route + '/output/',\n 'structure_variables': ['pos'],\n 'structure_nodes': [num_node_main - 1]}\n\n if horseshoe is True:\n config['AerogridLoader'] = {'unsteady': 'off',\n 'aligned_grid': 'on',\n 'mstar': 1,\n 'freestream_dir': ['1', '0', '0']}\n else:\n config['AerogridLoader'] = {'unsteady': 'off',\n 'aligned_grid': 'on',\n 'mstar': 80,\n 'freestream_dir': ['1', '0', '0']}\n config['AerogridPlot'] = {'folder': route + '/output/',\n 'include_rbm': 'off',\n 'include_applied_forces': 'on',\n 'minus_m_star': 0\n }\n config['AeroForcesCalculator'] = {'folder': route + '/output/forces',\n 'write_text_file': 'on',\n 'text_file_name': case_name + '_aeroforces.csv',\n 'screen_output': 'on',\n 'unsteady': 'off'\n }\n config['BeamPlot'] = {'folder': route + '/output/',\n 'include_rbm': 'off',\n 'include_applied_forces': 'on'}\n config.write()\n\n\nclean_test_files()\ngenerate_fem_file()\ngenerate_solver_file(horseshoe=True)\ngenerate_aero_file()\n",
"import os\n\nimport numpy as np\nfrom tvtk.api import tvtk, write_data\n\nimport sharpy.utils.algebra as algebra\nimport sharpy.utils.cout_utils as cout\nfrom sharpy.utils.settings import str2bool\nfrom sharpy.utils.solver_interface import solver, BaseSolver\nimport sharpy.utils.settings as settings\nfrom sharpy.utils.datastructures import init_matrix_structure, standalone_ctypes_pointer\nimport sharpy.aero.utils.uvlmlib as uvlmlib\n\n\n@solver\nclass StallCheck(BaseSolver):\n \"\"\"\n Outputs the incidence angle of every panel of the surface.\n \"\"\"\n solver_id = 'StallCheck'\n solver_classification = 'post-processor'\n\n settings_types = dict()\n settings_default = dict()\n settings_description = dict()\n\n settings_types['print_info'] = 'bool'\n settings_default['print_info'] = True\n settings_description['print_info'] = 'Print info to screen '\n\n settings_types['airfoil_stall_angles'] = 'dict'\n settings_default['airfoil_stall_angles'] = dict()\n settings_description['airfoil_stall_angles'] = 'Dictionary of stall angles for each airfoil'\n\n settings_types['output_degrees'] = 'bool'\n settings_default['output_degrees'] = False\n settings_description['output_degrees'] = 'Output incidence angles in degrees vs radians'\n\n settings_table = settings.SettingsTable()\n __doc__ += settings_table.generate(settings_types, settings_default, settings_description)\n\n def __init__(self):\n\n self.settings = None\n self.data = None\n\n self.ts_max = None\n self.ts = None\n\n def initialise(self, data, custom_settings=None):\n self.data = data\n if custom_settings is None:\n self.settings = data.settings[self.solver_id]\n else:\n self.settings = custom_settings\n settings.to_custom_types(self.settings, self.settings_types, self.settings_default)\n self.ts_max = len(self.data.structure.timestep_info)\n\n def run(self, online=False):\n if not online:\n for self.ts in range(self.ts_max):\n self.check_stall()\n cout.cout_wrap('...Finished', 1)\n else:\n self.ts = len(self.data.structure.timestep_info) - 1\n self.check_stall()\n return self.data\n\n def check_stall(self):\n # add entry to dictionary for postproc\n tstep = self.data.aero.timestep_info[self.ts]\n tstep.postproc_cell['incidence_angle'] = init_matrix_structure(dimensions=tstep.dimensions,\n with_dim_dimension=False)\n\n # create ctypes pointers\n tstep.postproc_cell['incidence_angle_ct_list'] = None\n tstep.postproc_cell['incidence_angle_ct_pointer'] = None\n tstep.postproc_cell['incidence_angle_ct_list'], tstep.postproc_cell['incidence_angle_ct_pointer'] = \\\n standalone_ctypes_pointer(tstep.postproc_cell['incidence_angle'])\n\n # call calculate\n uvlmlib.uvlm_calculate_incidence_angle(self.data.aero.timestep_info[self.ts],\n self.data.structure.timestep_info[self.ts])\n\n # calculate ratio of stalled panels and print\n stalled_panels = False\n stalled_surfs = np.zeros((tstep.n_surf, ), dtype=int)\n added_panels = []\n for i_surf in range(tstep.n_surf):\n added_panels.append([])\n\n for i_elem in range(self.data.structure.num_elem):\n for i_local_node in range(self.data.structure.num_node_elem):\n airfoil_id = self.data.aero.aero_dict['airfoil_distribution'][i_elem, i_local_node]\n if self.settings['airfoil_stall_angles']:\n i_global_node = self.data.structure.connectivities[i_elem, i_local_node]\n for i_dict in self.data.aero.struct2aero_mapping[i_global_node]:\n i_surf = i_dict['i_surf']\n i_n = i_dict['i_n']\n\n if i_n in added_panels[i_surf]:\n continue\n\n if i_n == tstep.dimensions[i_surf][1]:\n continue\n\n limits = self.settings['airfoil_stall_angles'][str(airfoil_id)]\n if tstep.postproc_cell['incidence_angle'][i_surf][0, i_n] < float(limits[0]):\n stalled_panels = True\n stalled_surfs[i_surf] += tstep.postproc_cell['incidence_angle'][i_surf].shape[1]\n elif tstep.postproc_cell['incidence_angle'][i_surf][0, i_n] > float(limits[1]):\n stalled_panels = True\n stalled_surfs[i_surf] += tstep.postproc_cell['incidence_angle'][i_surf].shape[1]\n\n if stalled_panels:\n if self.settings['print_info']:\n cout.cout_wrap('Some panel has an incidence angle out of the linear region', 1)\n cout.cout_wrap('The number of stalled panels per surface id are:', 1)\n for i_surf in range(tstep.n_surf):\n cout.cout_wrap('\\ti_surf = ' + str(i_surf) + ': ' + str(stalled_surfs[i_surf]) + ' panels.', 1)\n # cout.cout_wrap('In total, the ratio of stalled panels is: ', str(stalled_surfs.sum()/))\n\n if self.settings['output_degrees']:\n for i_surf in range(tstep.n_surf):\n tstep.postproc_cell['incidence_angle'][i_surf] *= 180/np.pi\n\n"
] |
[
[
"numpy.diag",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.ones",
"numpy.column_stack",
"numpy.array",
"numpy.zeros"
],
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
leeeeeeeee2/srgan
|
[
"608a5fa30f7039da11c18ad70f84f27755cfba6d"
] |
[
"models.py"
] |
[
"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom torchvision.models import vgg19\nimport math\n\n\nclass FeatureExtractor(nn.Module):\n def __init__(self):\n super(FeatureExtractor, self).__init__()\n vgg19_model = vgg19(pretrained=True)\n self.feature_extractor = nn.Sequential(*list(vgg19_model.features.children())[:18])\n\n def forward(self, img):\n return self.feature_extractor(img)\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_features):\n super(ResidualBlock, self).__init__()\n self.conv_block = nn.Sequential(\n nn.Conv2d(in_features, in_features, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(in_features, 0.8),\n nn.PReLU(),\n nn.Conv2d(in_features, in_features, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(in_features, 0.8),\n )\n\n def forward(self, x):\n return x + self.conv_block(x)\n\n\nclass GeneratorResNet(nn.Module):\n def __init__(self, in_channels=3, out_channels=3, n_residual_blocks=16):\n super(GeneratorResNet, self).__init__()\n\n # First layer\n self.conv1 = nn.Sequential(nn.Conv2d(in_channels, 64, kernel_size=9, stride=1, padding=4), nn.PReLU())\n\n # Residual blocks\n res_blocks = []\n for _ in range(n_residual_blocks):\n res_blocks.append(ResidualBlock(64))\n self.res_blocks = nn.Sequential(*res_blocks)\n\n # Second conv layer post residual blocks\n self.conv2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(64, 0.8))\n\n # Upsampling layers\n upsampling = []\n for out_features in range(2):\n upsampling += [\n # nn.Upsample(scale_factor=2),\n nn.Conv2d(64, 256, 3, 1, 1),\n nn.BatchNorm2d(256),\n nn.PixelShuffle(upscale_factor=2),\n nn.PReLU(),\n ]\n self.upsampling = nn.Sequential(*upsampling)\n\n # Final output layer\n self.conv3 = nn.Sequential(nn.Conv2d(64, out_channels, kernel_size=9, stride=1, padding=4), nn.Tanh())\n\n def forward(self, x):\n out1 = self.conv1(x)\n out = self.res_blocks(out1)\n out2 = self.conv2(out)\n out = torch.add(out1, out2)\n out = self.upsampling(out)\n out = self.conv3(out)\n return out\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_shape):\n super(Discriminator, self).__init__()\n\n self.input_shape = input_shape\n in_channels, in_height, in_width = self.input_shape\n patch_h, patch_w = int(in_height / 2 ** 4), int(in_width / 2 ** 4)\n self.output_shape = (1, patch_h, patch_w)\n\n def discriminator_block(in_filters, out_filters, first_block=False):\n layers = []\n layers.append(nn.Conv2d(in_filters, out_filters, kernel_size=3, stride=1, padding=1))\n if not first_block:\n layers.append(nn.BatchNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n layers.append(nn.Conv2d(out_filters, out_filters, kernel_size=3, stride=2, padding=1))\n layers.append(nn.BatchNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n layers = []\n in_filters = in_channels\n for i, out_filters in enumerate([64, 128, 256, 512]):\n layers.extend(discriminator_block(in_filters, out_filters, first_block=(i == 0)))\n in_filters = out_filters\n\n layers.append(nn.Conv2d(out_filters, 1, kernel_size=3, stride=1, padding=1))\n\n self.model = nn.Sequential(*layers)\n\n def forward(self, img):\n return self.model(img)"
] |
[
[
"torch.nn.Sequential",
"torch.add",
"torch.nn.PReLU",
"torch.nn.Conv2d",
"torch.nn.PixelShuffle",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
moshelooks/incubator-mxnet
|
[
"5245ef68191a6d47594bf331ec6e20ba6e93ad4c"
] |
[
"example/onnx/super_resolution.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Testing super_resolution model conversion\"\"\"\nfrom __future__ import absolute_import as _abs\nfrom __future__ import print_function\nfrom collections import namedtuple\nimport logging\nimport numpy as np\nfrom PIL import Image\nimport mxnet as mx\nfrom mxnet.test_utils import download\nimport mxnet.contrib.onnx as onnx_mxnet\n\n# set up logger\nlogging.basicConfig()\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.INFO)\n\ndef import_onnx():\n \"\"\"Import the onnx model into mxnet\"\"\"\n model_url = 'https://s3.amazonaws.com/onnx-mxnet/examples/super_resolution.onnx'\n download(model_url, 'super_resolution.onnx')\n\n LOGGER.info(\"Converting onnx format to mxnet's symbol and params...\")\n sym, arg_params, aux_params = onnx_mxnet.import_model('super_resolution.onnx')\n LOGGER.info(\"Successfully Converted onnx format to mxnet's symbol and params...\")\n return sym, arg_params, aux_params\n\ndef get_test_image():\n \"\"\"Download and process the test image\"\"\"\n # Load test image\n input_image_dim = 224\n img_url = 'https://s3.amazonaws.com/onnx-mxnet/examples/super_res_input.jpg'\n download(img_url, 'super_res_input.jpg')\n img = Image.open('super_res_input.jpg').resize((input_image_dim, input_image_dim))\n img_ycbcr = img.convert(\"YCbCr\")\n img_y, img_cb, img_cr = img_ycbcr.split()\n input_image = np.array(img_y)[np.newaxis, np.newaxis, :, :]\n return input_image, img_cb, img_cr\n\ndef perform_inference(sym, arg_params, aux_params, input_img, img_cb, img_cr):\n \"\"\"Perform inference on image using mxnet\"\"\"\n # create module\n mod = mx.mod.Module(symbol=sym, data_names=['input_0'], label_names=None)\n mod.bind(for_training=False, data_shapes=[('input_0', input_img.shape)])\n mod.set_params(arg_params=arg_params, aux_params=aux_params)\n\n # run inference\n batch = namedtuple('Batch', ['data'])\n mod.forward(batch([mx.nd.array(input_img)]))\n\n # Save the result\n img_out_y = Image.fromarray(np.uint8(mod.get_outputs()[0][0][0].\n asnumpy().clip(0, 255)), mode='L')\n\n result_img = Image.merge(\n \"YCbCr\", [img_out_y,\n img_cb.resize(img_out_y.size, Image.BICUBIC),\n img_cr.resize(img_out_y.size, Image.BICUBIC)]).convert(\"RGB\")\n output_img_dim = 672\n assert result_img.size == (output_img_dim, output_img_dim)\n LOGGER.info(\"Super Resolution example success.\")\n result_img.save(\"super_res_output.jpg\")\n return result_img\n\nif __name__ == '__main__':\n MX_SYM, MX_ARG_PARAM, MX_AUX_PARAM = import_onnx()\n INPUT_IMG, IMG_CB, IMG_CR = get_test_image()\n perform_inference(MX_SYM, MX_ARG_PARAM, MX_AUX_PARAM, INPUT_IMG, IMG_CB, IMG_CR)\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
almarklein/stentseg
|
[
"48255fffdc2394d1dc4ce2208c9a91e1d4c35a46",
"48255fffdc2394d1dc4ce2208c9a91e1d4c35a46",
"48255fffdc2394d1dc4ce2208c9a91e1d4c35a46",
"48255fffdc2394d1dc4ce2208c9a91e1d4c35a46"
] |
[
"lspeas/phantom/stats_alg_vs_cam123mean_error_2scanners.py",
"lspeas/phantom/plotting_result_error.py",
"lspeas/utils/curvature_helix_validation.py",
"stentseg/utils/fitting.py"
] |
[
"\"\"\" Read position errors from excel for statistical analysis\r\n\r\n\"\"\"\r\nimport os\r\nfrom stentseg.utils.datahandling import select_dir\r\nimport openpyxl # http://openpyxl.readthedocs.org/\r\nimport numpy as np\r\nfrom lspeas.utils.normality_statistics import paired_samples_ttest\r\n\r\n\r\ndef read_error_cam123(exceldir, workbook, profiles):\r\n \"\"\" read the absolute errors for 10 timepositions for all stent points\r\n \"\"\"\r\n wb = openpyxl.load_workbook(os.path.join(exceldir, workbook), data_only=True)\r\n abs_errors_profiles = []\r\n for profile in profiles:\r\n sheet = wb.get_sheet_by_name(profile)\r\n abs_errors_profile = []\r\n for phaserow in range(8,18): # excel rows 21-30 when 20,30; rows 9-18 when 8,18\r\n abs_errors = sheet.rows[phaserow][1:] # skip first col with notes\r\n abs_errors = [obj.value for obj in abs_errors if obj.value is not None]\r\n abs_errors_profile.append(abs_errors)\r\n spread = np.concatenate([a for a in abs_errors_profile], axis=0)\r\n abs_errors_profiles.append(spread)\r\n \r\n return abs_errors_profiles\r\n\r\ndef read_ampl_errorcam123(exceldir, workbook, profile):\r\n wb = openpyxl.load_workbook(os.path.join(exceldir, workbook), data_only=True)\r\n sheet = wb.get_sheet_by_name(profile)\r\n phaserow = 58 - 1 # 58 for 58; 60 for 60\r\n errors = sheet.rows[phaserow][1:] # skip first col with notes\r\n errors = [obj.value for obj in errors if obj.value is not None]\r\n return errors\r\n\r\n\r\nexceldir = select_dir(r'C:\\Users\\Maaike\\Dropbox\\UTdrive\\LSPEAS\\Analysis\\Validation robot',\r\n r'D:\\Profiles\\koenradesma\\Dropbox\\UTdrive\\LSPEAS\\Analysis\\Validation robot')\r\nworkbook = 'Errors cam123ref_vs_alg Toshiba.xlsx'\r\nworkbookF = 'Errors cam123ref_vs_alg Siemens.xlsx'\r\n\r\n## test over all 10 positions\r\nprof = 'ZA1'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZA2'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZA3'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZA6'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB1'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB2'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB3'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB4'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB5'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\nprof = 'ZB6'\r\nabs_errors_T_x = read_error_cam123(exceldir, workbook, [prof])\r\nabs_errors_F_x = read_error_cam123(exceldir, workbookF, [prof])\r\n\r\nt2, p2 = paired_samples_ttest(abs_errors_T_x, abs_errors_F_x, prof)\r\n\r\n\r\n## for the amplitudes\r\nprint(\"******* Amplitude errors *********\")\r\n\r\nprof = 'ZA1'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZA2'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZA3'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZA6'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB1'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB2'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB3'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB4'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB5'\r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)\r\n\r\n\r\nprof = 'ZB6' \r\nerrors_T_x = read_ampl_errorcam123(exceldir, workbook, prof)\r\nerrors_F_x = read_ampl_errorcam123(exceldir, workbookF, prof)\r\n\r\nt2, p2 = paired_samples_ttest(errors_T_x, errors_F_x, prof, amplitude=True)",
"\"\"\" Plotting result of motion_pattern_error\r\nused for SPIE abstract\r\n\"\"\"\r\n\r\n\r\ndef read_error_ouput(exceldir, workbook, rowS=18, colS=1, colE=5):\r\n \"\"\"\r\n \"\"\"\r\n wb = openpyxl.load_workbook(os.path.join(exceldir, workbook), data_only=True)\r\n sheet = wb.get_sheet_by_name('summery')\r\n rowS = rowS\r\n colS = colS\r\n colE = colE\r\n mean_abs_error = sheet.rows[rowS][colS:colE]\r\n mean_abs_error = [obj.value for obj in mean_abs_error] \r\n SD = sheet.rows[rowS+1][colS:colE]\r\n SD = [obj.value for obj in SD] \r\n MIN = sheet.rows[rowS+2][colS:colE]\r\n MIN = [obj.value for obj in MIN] \r\n Q1 = sheet.rows[rowS+3][colS:colE]\r\n Q1 = [obj.value for obj in Q1] \r\n Q3 = sheet.rows[rowS+4][colS:colE]\r\n Q3 = [obj.value for obj in Q3] \r\n MAX = sheet.rows[rowS+5][colS:colE]\r\n MAX = [obj.value for obj in MAX]\r\n profiles = sheet.rows[7][colS:colE]\r\n profiles = [obj.value for obj in profiles]\r\n \r\n return profiles, mean_abs_error, SD, MIN, Q1, Q3, MAX \r\n \r\n\r\n\r\nimport os\r\nimport openpyxl\r\nimport matplotlib.pyplot as plt\r\nfrom stentseg.utils.datahandling import select_dir\r\n# import seaborn as sns #sns.tsplot\r\n# https://www.wakari.io/sharing/bundle/ijstokes/pyvis-1h?has_login=False\r\n# http://spartanideas.msu.edu/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/\r\n\r\nexceldir = select_dir(r'C:\\Users\\Maaike\\Dropbox\\UTdrive\\LSPEAS\\Analysis\\Validation robot',\r\n r'D:\\Profiles\\koenradesma\\Dropbox\\UTdrive\\LSPEAS\\Analysis\\Validation robot')\r\nworkbookErrors = 'Errors camera_algorithm Toshiba.xlsx'\r\ndirsave = select_dir(r'C:\\Users\\Maaike\\Desktop','D:\\Profiles\\koenradesma\\Desktop')\r\n\r\n# plot frequency profiles\r\nprofiles, mean_abs_error, SD, MIN, Q1, Q3, MAX = read_error_ouput(exceldir, workbookErrors)\r\n\r\nf1 = plt.figure(num=1, figsize=(7.6, 5))\r\nax1 = f1.add_subplot(111)\r\nax1.spines[\"top\"].set_visible(False) \r\nax1.spines[\"right\"].set_visible(False)\r\nax1.get_xaxis().tick_bottom() \r\nax1.get_yaxis().tick_left()\r\nax1.plot(profiles, mean_abs_error, linestyle='', marker='o', color='b') \r\nax1.errorbar(profiles, mean_abs_error, yerr = SD, fmt=None, color='b', capsize=8)\r\n# plt.xticks(range(len(mean_abs_error)), profiles, size = 'medium')\r\nax1.set_xlabel('heart rate (bpm)', fontsize=14)\r\nax1.set_ylabel('absolute error (mm)', fontsize=14)\r\nplt.xlim(45,105)\r\nplt.ylim(0,0.3)\r\n# save\r\nplt.savefig(os.path.join(dirsave, 'errorgraphfreq.pdf'), papertype='a0', dpi=300)\r\n\r\n\r\n# plot amplitude profiles\r\nprofiles, mean_abs_error, SD, MIN, Q1, Q3, MAX = read_error_ouput(exceldir, workbookErrors, colS=5, colE=12)\r\n\r\nf2 = plt.figure(num=3, figsize=(7.6, 5))\r\nax2 = f2.add_subplot(111)\r\nax2.spines[\"top\"].set_visible(False) \r\nax2.spines[\"right\"].set_visible(False)\r\nax2.get_xaxis().tick_bottom() \r\nax2.get_yaxis().tick_left()\r\nax2.plot(profiles[0], mean_abs_error[0], linestyle='', marker='o', color='k') \r\nax2.errorbar(profiles[0], mean_abs_error[0], yerr = SD[0], fmt=None, ecolor='k', capsize=8)\r\nax2.plot(profiles[1:-2], mean_abs_error[1:-2], linestyle='', marker='o', color='b') \r\nax2.errorbar(profiles[1:-2], mean_abs_error[1:-2], yerr = SD[1:-2], fmt=None, ecolor='b', capsize=8)\r\nax2.plot(profiles[-2:], mean_abs_error[-2:], linestyle='', marker='o', color='r')\r\nax2.errorbar(profiles[-2:], mean_abs_error[-2:], yerr = SD[-2:], fmt=None, ecolor='r', capsize=8) \r\n# ax2.plot(profiles, Q1, 'b.--')\r\n# ax2.plot(profiles, Q3, 'b.--')\r\n# plt.xticks(range(len(mean_abs_error)), profiles, size = 'medium')\r\nax2.set_xlabel('amplitude (mm)', fontsize=14)\r\nax2.set_ylabel('absolute error (mm)', fontsize=14)\r\nplt.xlim(0,1.45)\r\nplt.ylim(0,0.3)\r\n# save\r\nplt.savefig(os.path.join(dirsave, 'errorgraphampl.pdf'), papertype='a0', dpi=300)",
"\"\"\" Curvature calculation validation helix phantom\r\n\r\n2019, Maaike A. Koenrades\r\n\r\nA CT scanned helix-shaped phantom with a known theoretically calculated curvature was used:\r\nSchuurmann RCL, Kuster L, Slump CH, Vahl A, Van Den Heuvel DAF, Ouriel K, et al. Aortic curvature instead of angulation allows improved estimation of the true aorto-iliac trajectory. Eur J Vasc Endovasc Surg 2016;51(2):216–24. Doi: 10.1016/j.ejvs.2015.09.008.\r\n\r\n\"\"\"\r\nfrom stentseg.utils.datahandling import select_dir\r\nimport sys, os\r\nimport scipy.io\r\nfrom lspeas.utils.curvature import get_curvatures\r\nimport numpy as np\r\nimport visvis as vv\r\nfrom stentseg.utils.centerline import smooth_centerline\r\n\r\nfiledir = select_dir(r'C:\\Users\\Maaike\\SURFdrive\\UTdrive\\LSPEAS\\Analysis\\Ring motion\\curvature check helix fantoom', \r\n r'D:\\Profiles\\koenradesma\\SURFdrive\\UTdrive\\LSPEAS\\Analysis\\Ring motion\\curvature check helix fantoom')\r\n\r\nfilename = 'helix'\r\nmatdict = scipy.io.loadmat(os.path.join(filedir, filename+'.mat'))\r\nvar = matdict['HELIX']\r\n# show phantom\r\nvv.plot(var)\r\npp = np.asarray(var[99:1399]) # 100:1400 was used in Matlab implementation to exclude ends (Jaimy)\r\nvv.plot(pp, lc='r', lw=3)\r\n\r\n# smooth pp (as in implementation)\r\nsmooth_pp = smooth_centerline(pp, 15) # smooth the 'interpolated polygon' to helix shape\r\nvv.figure()\r\nvv.plot(smooth_pp, lc='r', lw=1)\r\n\r\n# calc curvature\r\ncv = get_curvatures(smooth_pp)\r\n# convert mm-1 to m-1\r\ncv *= 1000\r\n\r\n# skip set of start and end points where curvature cannot be calculated properly\r\n# print(cv)\r\n# print(cv[:30])\r\n# print(cv[-30:])\r\nn_to_skip = 10\r\ncv = cv[n_to_skip:-n_to_skip]\r\n\r\nmean_curvature = np.mean(cv)\r\nstd_curvature = np.std(cv)\r\nmin_curvature = np.min(cv)\r\nmax_curvature = np.max(cv)\r\n\r\nvv.figure()\r\nax = vv.gca()\r\nvv.plot(np.arange(len(cv)), cv)\r\ncurv_theory = 28.62 # m-1\r\nvv.plot(np.arange(len(cv)), np.ones_like(cv)*curv_theory, lc='r')\r\nax.SetLimits(rangeY=(0,100))\r\n\r\n# error based on theoretical value of curvature\r\nperc_mean_error = (mean_curvature-curv_theory)/curv_theory*100\r\nperc_std_error = std_curvature/curv_theory*100\r\n\r\nerrors = cv - curv_theory\r\nperc_errors = (cv - curv_theory) / curv_theory *100\r\n\r\nmean_error = np.mean(errors)\r\nmean_error_perc = np.mean(perc_errors)\r\nstd_error = np.std(errors)\r\nstd_error_perc = np.std(perc_errors)\r\n\r\nprint('Mean error = {} +/- {} m-1'.format(mean_error,std_error))\r\nprint('Mean percentage error = {} +/- {} %'.format(mean_error_perc,std_error_perc))\r\n\r\n# 1.7 +-/ 2.6 m-1\r\n# 6.0 +/- 9.0 %\r\n",
"\"\"\" Code for fitting circles, ellipses, planes, etc.\n\"\"\"\n\n\nimport numpy as np\nfrom numpy.linalg import eig, inv\n\nfrom stentseg.utils.new_pointset import PointSet\n\n\ndef fit_circle(pp, warnIfIllDefined=True):\n \"\"\" Fit a circle on the given 2D points\n \n Returns a tuple (x, y, r).\n \n In case the three points are on a line, the algorithm will fail, and\n return (0, 0, 0). A warning is printed, but this can be suppressed.\n \n The solution is a Least Squares fit. The method as describes in [1] is\n called Modified Least Squares (MLS) and poses a closed form solution\n which is very robust.\n\n [1]\n Dale Umbach and Kerry N. Jones\n 2000\n A Few Methods for Fitting Circles to Data\n IEEE Transactions on Instrumentation and Measurement\n \"\"\"\n \n # Check\n if pp.ndim != 2:\n raise ValueError('Circle fit needs an Nx2 array.')\n if pp.shape[1] != 2:\n raise ValueError('Circle fit needs 2D points.')\n if pp.shape[0] < 2:\n raise ValueError('Circle fit needs at least two points.')\n \n def cov(a, b):\n n = len(a)\n Ex = a.sum() / n\n Ey = b.sum() / n\n return ( (a-Ex)*(b-Ey) ).sum() / (n-1)\n \n # Get x and y elements\n X = pp[:,0]\n Y = pp[:,1]\n xoffset = X.mean()\n yoffset = Y.mean()\n X = X - xoffset\n Y = Y - yoffset\n \n # In the paper there is a factor n*(n-1) in all equations below. However,\n # this factor is removed by devision in the equations in the following cell\n A = cov(X,X)\n B = cov(X,Y)\n C = cov(Y,Y)\n D = 0.5 * ( cov(X,Y**2) + cov(X,X**2) )\n E = 0.5 * ( cov(Y,X**2) + cov(Y,Y**2) )\n \n # Calculate denumerator\n denum = A*C - B*B\n if denum==0:\n if warnIfIllDefined:\n print(\"Warning: can not fit a circle to the given points.\")\n return 0, 0, 0\n \n # Calculate point\n x = (D*C-B*E)/denum + xoffset\n y = (A*E-B*D)/denum + yoffset\n c = PointSet([x, y])\n \n # Calculate radius\n r = c.distance(pp).sum() / len(pp)\n \n # Done\n return x, y, r\n\n\ndef fit_ellipse(pp):\n \"\"\" Fit an ellipse to the given 2D points\n \n Returns a tuple (x, y, r1, r2, phi).\n \n Algorithm derived from:\n From http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html.\n Based on approach suggested by Fitzgibbon et al., Direct least squares \n fitting of ellipsees, 1996.\n \"\"\"\n # Check\n if pp.ndim != 2:\n raise ValueError('Ellipse fit needs an Nx2 array.')\n if pp.shape[1] != 2:\n raise ValueError('Ellipse fit needs 2D points.')\n if pp.shape[0] < 3:\n raise ValueError('Ellipse fit needs at least three points.')\n \n # Get x and y and subtract offset to avoid inaccuracied during\n # eigenvalue decomposition.\n x = pp[:,0]\n y = pp[:,1]\n xoffset = x.mean()\n yoffset = y.mean()\n x = x - xoffset\n y = y - yoffset\n \n # Do the math\n x = x[:,np.newaxis]\n y = y[:,np.newaxis]\n D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))\n S = np.dot(D.T,D)\n C = np.zeros([6,6])\n C[0,2] = C[2,0] = 2; C[1,1] = -1\n E, V = eig(np.dot(inv(S), C))\n n = np.argmax(np.abs(E))\n a = V[:,n]\n b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]\n \n # Calculate position\n num = b*b-a*c\n x0 = (c*d-b*f)/num + xoffset\n y0 = (a*f-b*d)/num + yoffset\n \n # Calculate radii\n up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n res1 = np.sqrt(up/down1)\n res2 = np.sqrt(up/down2)\n \n # Calculate direction vector\n phi = 0.5*np.arctan(2*b/(a-c))\n \n # Ensure that first radius is the largers\n if res1 < res2:\n res2, res1 = res1, res2\n phi += 0.5 * np.pi\n \n # Ensure that phi is between 0 and pi\n while phi < 0:\n phi += np.pi\n while phi > np.pi:\n phi -= np.pi\n \n return x0, y0, res1, res2, phi\n\n\ndef area(circle_or_ellipse):\n \"\"\" Calculate the area of the given circle or ellipse\n \"\"\"\n \n if len(circle_or_ellipse) == 3:\n r1 = r2 = circle_or_ellipse[2]\n elif len(circle_or_ellipse) == 5:\n r1, r2 = circle_or_ellipse[2], circle_or_ellipse[3]\n else:\n raise ValueError('Input of area() is not a circle nor an ellipse.')\n \n return np.pi * r1 * r2\n\n\ndef sample_circle(c, N=32):\n \"\"\" Sample points on a circle c\n \n Returns a 2D PointSet with N points\n \"\"\"\n \n assert len(c) == 3\n \n # Get x, y and radius\n x, y, r = c\n \n # Sample N points, but add one to close the loop\n a = np.linspace(0,2*np.pi, N+1)\n \n # Prepare array\n pp = np.empty((len(a), 2), dtype=np.float32)\n \n # Apply polar coordinates\n pp[:,0] = np.cos(a) * r + x\n pp[:,1] = np.sin(a) * r + y\n \n # Return as a pointset\n return PointSet(pp)\n\n\ndef sample_ellipse(e, N=32):\n \"\"\" Sample points on a ellipse e\n \n Returns a 2D PointSet with N+1 points\n \"\"\"\n \n assert len(e) == 5\n \n # Get x, y, radii and phi\n x, y, r1, r2, phi = e\n \n # Sample N points, but add one to close the loop\n a = np.linspace(0, 2*np.pi, N+1)\n \n # Prepare array\n pp = np.empty((len(a), 2), dtype=np.float32)\n \n # Apply polar coordinates\n pp[:,0] = x + r1 * np.cos(a) * np.cos(phi) - r2 * np.sin(a) * np.sin(phi)\n pp[:,1] = y + r1 * np.cos(a) * np.sin(phi) + r2 * np.sin(a) * np.cos(phi)\n \n # Return as a pointset\n return PointSet(pp)\n\n\ndef fit_plane(pp):\n \"\"\" Fit a plane through a set of 3D points\n \n Returns a tuple (a, b, c, d) which represents the plane mathematically\n as ``a*x + b*y + c*z = d``.\n \n This method uses singular value decomposition. It is the SVD method\n plublished here: http://stackoverflow.com/questions/15959411\n \"\"\"\n \n # Check\n if pp.ndim != 2:\n raise ValueError('Plane fit needs an Nx3 array.')\n if pp.shape[1] != 3:\n raise ValueError('Plane fit needs 3D points.')\n if pp.shape[0] < 3:\n raise ValueError('Plane fit needs at least three points.')\n \n rows, cols = pp.shape\n # Set up constraint equations of the form AB = 0,\n # where B is a column vector of the plane coefficients\n # in the form b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.\n p = np.ones((rows, 1))\n AB = np.hstack([pp, p])\n [u, d, v] = np.linalg.svd(AB, 0)\n B = v[3, :] # Solution is last column of v.\n # Normalize\n nn = np.linalg.norm(B[0:3])\n B = B / nn\n # Make sure that the plane points up\n if B[3] > 0:\n B = [-x for x in B]\n # Return a b c d\n return B[0], B[1], B[2], B[3]\n\n\ndef project_to_plane(pp, plane):\n \"\"\" Project given 3D points to a plane to make them 2D\n \n Returns a 2D PointSet. We assume that the plane represents a grid\n that is aligned with the world grid, but rotated over the x and y\n axis.\n \"\"\"\n \n # Check\n if pp.ndim != 2:\n raise ValueError('project_to_plane needs an Nx3 array.')\n if pp.shape[1] != 3:\n raise ValueError('project_to_plane needs 3D points.')\n \n # Prepare\n a, b, c, d = plane\n norm = a**2 + b**2 + c**2\n common = (a*pp[:,0] + b*pp[:,1] + c*pp[:,2] + d) / norm\n \n # Calculate angles\n phix = np.arctan(a/c)\n phiy = np.arctan(b/c)\n \n # Project points to the plane. Points are still in world\n # coordinates, but are moved so that thet lie on the plane. The\n # movement is such that they are now on the closest point to the\n # plane.\n pp3 = pp.copy()\n pp3[:,0] = pp[:,0] - a * common\n pp3[:,1] = pp[:,1] - b * common\n pp3[:,2] = pp[:,2] - c * common\n \n # Rotate the points\n pp2 = PointSet(pp3[:,:2])\n pp2[:,0] = pp3[:,0] / np.cos(phix)\n pp2[:,1] = pp3[:,1] / np.cos(phiy)\n \n # Add some information so we can reconstruct the points\n pp2.plane = a, b, c, d\n \n return pp2\n\n\ndef signed_distance_to_plane(pp, plane):\n \"\"\" Find the signed distances of the given 3D points to the given plane.\n Note that the distances are signed, and can thus be negative.\n \"\"\"\n a, b, c, d = plane\n plane_norm = (a**2 + b**2 + c**2) ** 0.5\n return (a * pp[:, 0] + b * pp[:, 1] + c * pp[:, 2] + d) / plane_norm\n\n\ndef project_from_plane(pp, plane):\n \"\"\" Project 2D points on a plane to the original 3D coordinate frame\n \n Returns a 3D PointSet.\n \"\"\"\n \n # Check\n if pp.ndim != 2:\n raise ValueError('project_from_plane needs an Nx2 array.')\n if pp.shape[1] != 2:\n raise ValueError('project_from_plane needs 2D points.')\n \n # Prepare\n pp2 = pp\n a, b, c, d = plane\n phix = np.arctan(a/c)\n phiy = np.arctan(b/c)\n \n # Init 3D points\n pp3 = PointSet(np.zeros((pp2.shape[0], 3), 'float32'))\n \n # Rotate the points\n pp3[:,0] = pp2[:,0] * np.cos(phix)\n pp3[:,1] = pp2[:,1] * np.cos(phiy)\n \n # Find the z value for all points\n pp3[:,2] = -(pp3[:,0]*a + pp3[:,1]*b + d) / c\n \n return pp3\n\n\ndef convex_hull(points):\n \"\"\"Computes the convex hull of a set of 2D points\n \n Input: an iterable sequence of (x, y) pairs representing the points.\n Output: a list of vertices of the convex hull in counter-clockwise order,\n starting from the vertex with the lexicographically smallest coordinates.\n Implements Andrew's monotone chain algorithm. O(n log n) complexity.\n \n Each tuple in points may contain additional elements which happilly move\n along, but only the first 2 elements (x,y) are considered.\n \"\"\"\n \n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(points, key=lambda x:x[:2])\n \n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n \n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n \n # Build lower hull \n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n \n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n \n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \n return lower[:-1] + upper[:-1]\n\n\n\nif __name__ == '__main__':\n \n from stentseg.utils.new_pointset import PointSet\n \n # Create some data, 2D and 3D\n pp2 = PointSet(2)\n pp3 = PointSet(3)\n for r in np.linspace(0, 2*np.pi):\n x = np.sin(r) + 10\n y = np.cos(r) * 1.33 + 20\n z = 0.17*x + 0.79*y + 30\n pp2.append(x, y)\n pp3.append(x, y, z)\n # With noise\n pp2 += np.random.normal(0, 0.15, size=pp2.shape)\n pp3 += np.random.normal(0, 0.15, size=pp3.shape)\n \n # Fit 2D \n c2 = fit_circle(pp2)\n e2 = fit_ellipse(pp2)\n print('area circle 2D: % 1.2f' % area(c2))\n print('area ellipse 2D: % 1.2f' % area(e2))\n \n # Fit 3D. We first fit a plane, then project the points onto that\n # plane to make the points 2D, and then we fit the ellipse.\n # Further down, we sample the ellipse and project them to 3D again \n # to be able to visualize the result.\n plane = fit_plane(pp3)\n pp3_2 = project_to_plane(pp3, plane)\n c3 = fit_circle(pp3_2)\n e3 = fit_ellipse(pp3_2)\n print('area circle 3D: % 1.2f' % area(c3))\n print('area ellipse 3D: % 1.2f' % area(e3))\n \n # For visualization, calculate 4 points on rectangle that lies on the plane\n x1, x2 = pp3.min(0)[0]-0.3, pp3.max(0)[0]+0.3\n y1, y2 = pp3.min(0)[1]-0.3, pp3.max(0)[1]+0.3\n p1 = x1, y1, -(x1*plane[0] + y1*plane[1] + plane[3]) / plane[2]\n p2 = x2, y1, -(x2*plane[0] + y1*plane[1] + plane[3]) / plane[2]\n p3 = x2, y2, -(x2*plane[0] + y2*plane[1] + plane[3]) / plane[2]\n p4 = x1, y2, -(x1*plane[0] + y2*plane[1] + plane[3]) / plane[2]\n \n # Init visualization\n import visvis as vv\n fig = vv.clf()\n fig.position = 300, 300, 1000, 600\n \n # 2D vis\n a = vv.subplot(121)\n a.daspectAuto = False\n a.axis.showGrid = True\n vv.title('2D fitting')\n vv.xlabel('x'); vv.ylabel('y')\n # Plot\n vv.plot(pp2, ls='', ms='.', mc='k')\n# vv.plot(sample_circle(c2), lc='r', lw=2)\n vv.plot(sample_ellipse(e2), lc='b', lw=2)\n# vv.legend('2D points', 'Circle fit', 'Ellipse fit')\n vv.legend('2D points', 'Ellipse fit')\n \n # 3D vis\n a = vv.subplot(122)\n a.daspectAuto = False\n a.axis.showGrid = True\n vv.title('3D fitting')\n vv.xlabel('x'); vv.ylabel('y'); vv.zlabel('z')\n # Plot\n vv.plot(pp3, ls='', ms='.', mc='k')\n vv.plot(project_from_plane(pp3_2, plane), lc='r', ls='', ms='.', mc='r', mw=4)\n# vv.plot(project_from_plane(sample_circle(c3), plane), lc='r', lw=2)\n vv.plot(project_from_plane(sample_ellipse(e3), plane), lc='b', lw=2)\n vv.plot(np.array([p1, p2, p3, p4, p1]), lc='g', lw=2)\n# vv.legend('3D points', 'Projected points', 'Circle fit', 'Ellipse fit', 'Plane fit')\n vv.legend('3D points', 'Projected points', 'Ellipse fit', 'Plane fit')\n "
] |
[
[
"numpy.concatenate"
],
[
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.figure"
],
[
"numpy.ones_like",
"numpy.min",
"numpy.asarray",
"numpy.max",
"numpy.std",
"numpy.mean"
],
[
"numpy.dot",
"numpy.hstack",
"numpy.linalg.svd",
"numpy.ones_like",
"numpy.sqrt",
"numpy.linspace",
"numpy.arctan",
"numpy.abs",
"numpy.linalg.inv",
"numpy.linalg.norm",
"numpy.cos",
"numpy.ones",
"numpy.sin",
"numpy.random.normal",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ArmenFirman/Intelligent-Solar-Energy-Manager
|
[
"7a6a796b4e66442bd512eb7e1679c5ba29e145f1"
] |
[
"Main Code/WeatherData.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Amin Asbai\n\"\"\"\nimport json\nimport pandas as pd\nimport requests\n\n\ndef update_Weather_data(df):\n url='http://api.openweathermap.org/data/2.5/weather?q=Andratx&units=metric&appid=1e47e582bff799e3514239429b76f2aa'\n response = requests.get(url)\n climate_data=response.json()\n data=clean_data(climate_data)\n updated_dataframe=update_dataframe(df,data)\n return updated_dataframe\n\ndef clean_data(climate_data):\n main_data=climate_data[\"main\"]\n wind_data=climate_data[\"wind\"]\n data = {**main_data, **wind_data}\n data.pop(\"feels_like\", None)\n data.pop(\"temp_min\", None)\n data.pop(\"temp_max\", None)\n data[\"pressure\"]=100*data[\"pressure\"]\n data[\"irradiance\"]=None\n return data\n\ndef update_dataframe(df,dict_weather):\n df = df.iloc[1:]\n df = df.drop(columns=['Hour', 'Month'])\n aux_df=pd.DataFrame()\n for i in df.columns:\n aux_df.loc[0,i]=dict_weather[i]\n aux_df.insert(0, 'TimeStamp', pd.to_datetime('now').replace(second=0,microsecond=0))\n aux_df.set_index('TimeStamp', inplace=True)\n df=df.append(aux_df)\n df['Hour']=df.index.hour\n df['Month']=df.index.month\n return df"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
arunkumarchacko/ML_SageMaker_Studies
|
[
"59660b2bc9b163a62fa271ded3dc328700db7e67"
] |
[
"Project_Plagiarism_Detection/problem_unittests.py"
] |
[
"from unittest.mock import MagicMock, patch\nimport sklearn.naive_bayes\nimport numpy as np\nimport pandas as pd\nimport re\n\n# test csv file\nTEST_CSV = 'data/test_info.csv'\n\nclass AssertTest(object):\n '''Defines general test behavior.'''\n def __init__(self, params):\n self.assert_param_message = '\\n'.join([str(k) + ': ' + str(v) + '' for k, v in params.items()])\n \n def test(self, assert_condition, assert_message):\n assert assert_condition, assert_message + '\\n\\nUnit Test Function Parameters\\n' + self.assert_param_message\n\ndef _print_success_message():\n print('Tests Passed!')\n\n# test clean_dataframe\ndef test_numerical_df(numerical_dataframe):\n \n # test result\n transformed_df = numerical_dataframe(TEST_CSV)\n \n # Check type is a DataFrame\n assert isinstance(transformed_df, pd.DataFrame), 'Returned type is {}.'.format(type(transformed_df))\n \n # check columns\n column_names = list(transformed_df)\n assert 'File' in column_names, 'No File column, found.'\n assert 'Task' in column_names, 'No Task column, found.'\n assert 'Category' in column_names, 'No Category column, found.'\n assert 'Class' in column_names, 'No Class column, found.'\n \n # check conversion values\n assert transformed_df.loc[0, 'Category'] == 1, '`heavy` plagiarism mapping test, failed.'\n assert transformed_df.loc[2, 'Category'] == 0, '`non` plagiarism mapping test, failed.'\n assert transformed_df.loc[30, 'Category'] == 3, '`cut` plagiarism mapping test, failed.'\n assert transformed_df.loc[5, 'Category'] == 2, '`light` plagiarism mapping test, failed.'\n assert transformed_df.loc[37, 'Category'] == -1, 'original file mapping test, failed; should have a Category = -1.'\n assert transformed_df.loc[41, 'Category'] == -1, 'original file mapping test, failed; should have a Category = -1.'\n \n _print_success_message()\n\n\ndef test_containment(complete_df, containment_fn):\n \n # check basic format and value \n # for n = 1 and just the fifth file\n test_val = containment_fn(complete_df, 1, 'g0pA_taske.txt')\n \n assert isinstance(test_val, float), 'Returned type is {}.'.format(type(test_val))\n assert test_val<=1.0, 'It appears that the value is not normalized; expected a value <=1, got: '+str(test_val)\n \n # known vals for first few files\n filenames = ['g0pA_taska.txt', 'g0pA_taskb.txt', 'g0pA_taskc.txt', 'g0pA_taskd.txt']\n ngram_1 = [0.39814814814814814, 1.0, 0.86936936936936937, 0.5935828877005348]\n ngram_3 = [0.0093457943925233638, 0.96410256410256412, 0.61363636363636365, 0.15675675675675677]\n \n # results for comparison\n results_1gram = []\n results_3gram = []\n \n for i in range(4):\n val_1 = containment_fn(complete_df, 1, filenames[i])\n val_3 = containment_fn(complete_df, 3, filenames[i])\n results_1gram.append(val_1)\n results_3gram.append(val_3)\n \n print(results_1gram)\n print(ngram_1)\n # check correct results\n assert all(np.isclose(results_1gram, ngram_1, rtol=1e-04)), \\\n 'n=1 calculations are incorrect. Double check the intersection calculation.'\n # check correct results\n assert all(np.isclose(results_3gram, ngram_3, rtol=1e-04)), \\\n 'n=3 calculations are incorrect.'\n \n _print_success_message()\n \ndef test_lcs(df, lcs_word):\n \n test_index = 10 # file 10\n \n # get answer file text\n answer_text = df.loc[test_index, 'Text'] \n \n # get text for orig file\n # find the associated task type (one character, a-e)\n task = df.loc[test_index, 'Task']\n # we know that source texts have Class = -1\n orig_rows = df[(df['Class'] == -1)]\n orig_row = orig_rows[(orig_rows['Task'] == task)]\n source_text = orig_row['Text'].values[0]\n \n # calculate LCS\n test_val = lcs_word(answer_text, source_text)\n \n # check type\n assert isinstance(test_val, float), 'Returned type is {}.'.format(type(test_val))\n assert test_val<=1.0, 'It appears that the value is not normalized; expected a value <=1, got: '+str(test_val)\n \n # known vals for first few files\n lcs_vals = [0.1917808219178082, 0.8207547169811321, 0.8464912280701754, 0.3160621761658031, 0.24257425742574257]\n \n # results for comparison\n results = []\n \n for i in range(5):\n # get answer and source text\n answer_text = df.loc[i, 'Text'] \n task = df.loc[i, 'Task']\n # we know that source texts have Class = -1\n orig_rows = df[(df['Class'] == -1)]\n orig_row = orig_rows[(orig_rows['Task'] == task)]\n source_text = orig_row['Text'].values[0]\n print(answer_text)\n print(source_text)\n # calc lcs\n val = lcs_word(answer_text, source_text)\n results.append(val)\n \n #print(results)\n #print(lcs_vals)\n # check correct results\n assert all(np.isclose(results, lcs_vals, rtol=1e-05)), 'LCS calculations are incorrect.'\n \n _print_success_message()\n \ndef test_data_split(train_x, train_y, test_x, test_y):\n \n # check types\n assert isinstance(train_x, np.ndarray),\\\n 'train_x is not an array, instead got type: {}'.format(type(train_x))\n assert isinstance(train_y, np.ndarray),\\\n 'train_y is not an array, instead got type: {}'.format(type(train_y))\n assert isinstance(test_x, np.ndarray),\\\n 'test_x is not an array, instead got type: {}'.format(type(test_x))\n assert isinstance(test_y, np.ndarray),\\\n 'test_y is not an array, instead got type: {}'.format(type(test_y))\n \n # should hold all 95 submission files\n assert len(train_x) + len(test_x) == 95, \\\n 'Unexpected amount of train + test data. Expecting 95 answer text files, got ' +str(len(train_x) + len(test_x))\n assert len(test_x) > 1, \\\n 'Unexpected amount of test data. There should be multiple test files.'\n \n # check shape\n assert train_x.shape[1]==2, \\\n 'train_x should have as many columns as selected features, got: {}'.format(train_x.shape[1])\n assert len(train_y.shape)==1, \\\n 'train_y should be a 1D array, got shape: {}'.format(train_y.shape)\n \n _print_success_message()\n \n \n "
] |
[
[
"numpy.isclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SamvitJ/Deep-Feature-Flow
|
[
"56f982741aa4886878eca3d566419b353c62b698"
] |
[
"dff_deeplab/config/config.py"
] |
[
"# --------------------------------------------------------\n# Deep Feature Flow\n# Copyright (c) 2016 by Contributors\n# Copyright (c) 2017 Microsoft\n# Licensed under The Apache-2.0 License [see LICENSE for details]\n# Modified by Xizhou Zhu, Yuwen Xiong, Bin Xiao\n# --------------------------------------------------------\n\nimport yaml\nimport numpy as np\nfrom easydict import EasyDict as edict\n\nconfig = edict()\n\nconfig.MXNET_VERSION = ''\nconfig.output_path = ''\nconfig.symbol = ''\nconfig.gpus = ''\nconfig.CLASS_AGNOSTIC = True\nconfig.SCALES = [(360, 600)] # first is scale (the shorter side); second is max size\n\n# default training\nconfig.default = edict()\nconfig.default.frequent = 20\nconfig.default.kvstore = 'device'\n\n# network related params\nconfig.network = edict()\nconfig.network.pretrained = ''\nconfig.network.pretrained_flow = ''\nconfig.network.pretrained_epoch = 0\nconfig.network.PIXEL_MEANS = np.array([0, 0, 0])\nconfig.network.IMAGE_STRIDE = 0\nconfig.network.FIXED_PARAMS = ['gamma', 'beta']\nconfig.network.DFF_FEAT_DIM = 2048\n\n# dataset related params\nconfig.dataset = edict()\nconfig.dataset.dataset = 'CityScape'\nconfig.dataset.image_set = 'leftImg8bit_train'\nconfig.dataset.test_image_set = 'leftImg8bit_val'\nconfig.dataset.root_path = '../data'\nconfig.dataset.dataset_path = '../data/cityscapes'\nconfig.dataset.NUM_CLASSES = 19\nconfig.dataset.annotation_prefix = 'gtFine'\n\n\nconfig.TRAIN = edict()\nconfig.TRAIN.lr = 0\nconfig.TRAIN.lr_step = ''\nconfig.TRAIN.lr_factor = 0.1\nconfig.TRAIN.warmup = False\nconfig.TRAIN.warmup_lr = 0\nconfig.TRAIN.warmup_step = 0\nconfig.TRAIN.momentum = 0.9\nconfig.TRAIN.wd = 0.0005\nconfig.TRAIN.begin_epoch = 0\nconfig.TRAIN.end_epoch = 0\nconfig.TRAIN.model_prefix = ''\n\n# whether resume training\nconfig.TRAIN.RESUME = False\n# whether flip image\nconfig.TRAIN.FLIP = True\n# whether shuffle image\nconfig.TRAIN.SHUFFLE = True\n# whether use OHEM\nconfig.TRAIN.ENABLE_OHEM = False\n# size of images for each device, 2 for rcnn, 1 for rpn and e2e\nconfig.TRAIN.BATCH_IMAGES = 1\n# e2e changes behavior of anchor loader and metric\nconfig.TRAIN.END2END = False\n# group images with similar aspect ratio\nconfig.TRAIN.ASPECT_GROUPING = True\n\n# used for end2end training\n\n# DFF, trained image sampled from [min_offset, max_offset]\nconfig.TRAIN.MIN_OFFSET = -4\nconfig.TRAIN.MAX_OFFSET = 0\n\nconfig.TEST = edict()\n# size of images for each device\nconfig.TEST.BATCH_IMAGES = 1\n\n# DFF\nconfig.TEST.KEY_FRAME_INTERVAL = 5\n\nconfig.TEST.max_per_image = 300\n\n# Test Model Epoch\nconfig.TEST.test_epoch = 0\n\n\ndef update_config(config_file):\n exp_config = None\n with open(config_file) as f:\n exp_config = edict(yaml.load(f))\n for k, v in exp_config.items():\n if k in config:\n if isinstance(v, dict):\n if k == 'TRAIN':\n if 'BBOX_WEIGHTS' in v:\n v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS'])\n elif k == 'network':\n if 'PIXEL_MEANS' in v:\n v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS'])\n for vk, vv in v.items():\n config[k][vk] = vv\n else:\n if k == 'SCALES':\n config[k][0] = (tuple(v))\n else:\n config[k] = v\n else:\n raise ValueError(\"key must exist in config.py\")\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thehomebrewnerd/featuretools
|
[
"5a7e09edf02b463ad903c6d8c40daa86f208c0c0",
"5a7e09edf02b463ad903c6d8c40daa86f208c0c0"
] |
[
"featuretools/tests/primitive_tests/test_groupby_transform_primitives.py",
"featuretools/tests/entityset_tests/test_last_time_index.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom ..testing_utils import make_ecommerce_entityset\n\nimport featuretools as ft\nfrom featuretools.computational_backends import PandasBackend\nfrom featuretools.primitives import (\n CumCount,\n CumMax,\n CumMean,\n CumMin,\n CumSum,\n Last,\n TransformPrimitive\n)\nfrom featuretools.variable_types import DatetimeTimeIndex, Numeric\n\n\[email protected]\ndef es():\n return make_ecommerce_entityset()\n\n\nclass TestCumCount:\n\n primitive = CumCount\n\n def test_order(self):\n g = pd.Series([\"a\", \"b\", \"a\"])\n\n answers = ([1, 2], [1])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(g.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_regular(self):\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\"])\n answers = ([1, 2], [1, 2], [1], [1])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(g.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_discrete(self):\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\"])\n answers = ([1, 2], [1, 2], [1], [1])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(g.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n\nclass TestCumSum:\n\n primitive = CumSum\n\n def test_order(self):\n v = pd.Series([1, 2, 2])\n g = pd.Series([\"a\", \"b\", \"a\"])\n\n answers = ([1, 3], [2])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_regular(self):\n v = pd.Series([101, 102, 103, 104, 105, 106])\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\"])\n answers = ([101, 204], [102, 208], [104], [105])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n\nclass TestCumMean:\n primitive = CumMean\n\n def test_order(self):\n v = pd.Series([1, 2, 2])\n g = pd.Series([\"a\", \"b\", \"a\"])\n\n answers = ([1, 1.5], [2])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_regular(self):\n v = pd.Series([101, 102, 103, 104, 105, 106])\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\"])\n answers = ([101, 102], [102, 104], [104], [105])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n\nclass TestCumMax:\n\n primitive = CumMax\n\n def test_order(self):\n v = pd.Series([1, 2, 2])\n g = pd.Series([\"a\", \"b\", \"a\"])\n\n answers = ([1, 2], [2])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_regular(self):\n v = pd.Series([101, 102, 103, 104, 105, 106])\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\"])\n answers = ([101, 103], [102, 106], [104], [105])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n\nclass TestCumMin:\n\n primitive = CumMin\n\n def test_order(self):\n v = pd.Series([1, 2, 2])\n g = pd.Series([\"a\", \"b\", \"a\"])\n\n answers = ([1, 1], [2])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n def test_regular(self):\n v = pd.Series([101, 102, 103, 104, 105, 106, 100])\n g = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\", \"b\", \"a\"])\n answers = ([101, 101, 100], [102, 102], [104], [105])\n\n function = self.primitive().get_function()\n for (_, group), answer in zip(v.groupby(g), answers):\n np.testing.assert_array_equal(function(group), answer)\n\n\ndef test_cum_sum(es):\n log_value_feat = es['log']['value']\n dfeat = ft.Feature(es['sessions']['device_type'], entity=es['log'])\n cum_sum = ft.Feature(log_value_feat, groupby=dfeat, primitive=CumSum)\n features = [cum_sum]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15))\n cvalues = df[cum_sum.get_name()].values\n assert len(cvalues) == 15\n cum_sum_values = [0, 5, 15, 30, 50, 0, 1, 3, 6, 6, 50, 55, 55, 62, 76]\n for i, v in enumerate(cum_sum_values):\n assert v == cvalues[i]\n\n\ndef test_cum_min(es):\n log_value_feat = es['log']['value']\n cum_min = ft.Feature(log_value_feat, groupby=es['log']['session_id'], primitive=CumMin)\n features = [cum_min]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15))\n cvalues = df[cum_min.get_name()].values\n assert len(cvalues) == 15\n cum_min_values = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for i, v in enumerate(cum_min_values):\n assert v == cvalues[i]\n\n\ndef test_cum_max(es):\n log_value_feat = es['log']['value']\n cum_max = ft.Feature(log_value_feat, groupby=es['log']['session_id'], primitive=CumMax)\n features = [cum_max]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15))\n cvalues = df[cum_max.get_name()].values\n assert len(cvalues) == 15\n cum_max_values = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]\n for i, v in enumerate(cum_max_values):\n assert v == cvalues[i]\n\n\ndef test_cum_sum_group_on_nan(es):\n log_value_feat = es['log']['value']\n es['log'].df['product_id'] = (['coke zero'] * 3 + ['car'] * 2 +\n ['toothpaste'] * 3 + ['brown bag'] * 2 +\n ['shoes'] +\n [np.nan] * 4 +\n ['coke_zero'] * 2)\n es['log'].df['value'][16] = 10\n cum_sum = ft.Feature(log_value_feat, groupby=es['log']['product_id'], primitive=CumSum)\n features = [cum_sum]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(17))\n cvalues = df[cum_sum.get_name()].values\n assert len(cvalues) == 17\n cum_sum_values = [0, 5, 15,\n 15, 35,\n 0, 1, 3,\n 3, 3,\n 0,\n np.nan, np.nan, np.nan, np.nan, np.nan, 10]\n\n assert len(cvalues) == len(cum_sum_values)\n for i, v in enumerate(cum_sum_values):\n if np.isnan(v):\n assert (np.isnan(cvalues[i]))\n else:\n assert v == cvalues[i]\n\n\ndef test_cum_sum_numpy_group_on_nan(es):\n class CumSumNumpy(TransformPrimitive):\n \"\"\"Returns the cumulative sum after grouping\"\"\"\n\n name = \"cum_sum\"\n input_types = [Numeric]\n return_type = Numeric\n uses_full_entity = True\n\n def get_function(self):\n def cum_sum(values):\n return values.cumsum().values\n return cum_sum\n\n log_value_feat = es['log']['value']\n es['log'].df['product_id'] = (['coke zero'] * 3 + ['car'] * 2 +\n ['toothpaste'] * 3 + ['brown bag'] * 2 +\n ['shoes'] +\n [np.nan] * 4 +\n ['coke_zero'] * 2)\n es['log'].df['value'][16] = 10\n cum_sum = ft.Feature(log_value_feat, groupby=es['log']['product_id'], primitive=CumSumNumpy)\n assert cum_sum.get_name() == \"CUM_SUM(value) by product_id\"\n features = [cum_sum]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(17))\n cvalues = df[cum_sum.get_name()].values\n assert len(cvalues) == 17\n cum_sum_values = [0, 5, 15,\n 15, 35,\n 0, 1, 3,\n 3, 3,\n 0,\n np.nan, np.nan, np.nan, np.nan, np.nan, 10]\n\n assert len(cvalues) == len(cum_sum_values)\n for i, v in enumerate(cum_sum_values):\n if np.isnan(v):\n assert (np.isnan(cvalues[i]))\n else:\n assert v == cvalues[i]\n\n\ndef test_cum_handles_uses_full_entity(es):\n def check(feature):\n pandas_backend = PandasBackend(es, [feature])\n df_1 = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2], time_last=None)\n df_2 = pandas_backend.calculate_all_features(instance_ids=[2, 4], time_last=None)\n\n # check that the value for instance id 2 matches\n assert (df_2.loc[2] == df_1.loc[2]).all()\n\n for primitive in [CumSum, CumMean, CumMax, CumMin]:\n check(ft.Feature(es['log']['value'], groupby=es['log']['session_id'], primitive=primitive))\n\n check(ft.Feature(es['log']['session_id'], groupby=es['log']['session_id'], primitive=CumCount))\n\n\ndef test_cum_mean(es):\n log_value_feat = es['log']['value']\n cum_mean = ft.Feature(log_value_feat, groupby=es['log']['session_id'], primitive=CumMean)\n features = [cum_mean]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15))\n cvalues = df[cum_mean.get_name()].values\n assert len(cvalues) == 15\n cum_mean_values = [0, 2.5, 5, 7.5, 10, 0, .5, 1, 1.5, 0, 0, 2.5, 0, 3.5, 7]\n for i, v in enumerate(cum_mean_values):\n assert v == cvalues[i]\n\n\ndef test_cum_count(es):\n cum_count = ft.Feature(es['log']['session_id'],\n groupby=es['log']['session_id'],\n primitive=CumCount)\n features = [cum_count]\n df = ft.calculate_feature_matrix(entityset=es,\n features=features,\n instance_ids=range(15))\n cvalues = df[cum_count.get_name()].values\n assert len(cvalues) == 15\n cum_count_values = [1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 1, 2, 1, 2, 3]\n for i, v in enumerate(cum_count_values):\n assert v == cvalues[i]\n\n\ndef test_rename(es):\n cum_count = ft.Feature(es['log']['session_id'],\n groupby=es['log']['session_id'],\n primitive=CumCount)\n copy_feat = cum_count.rename(\"rename_test\")\n assert cum_count.hash() != copy_feat.hash()\n assert cum_count.get_name() != copy_feat.get_name()\n assert all([x.generate_name() == y.generate_name() for x, y\n in zip(cum_count.base_features, copy_feat.base_features)])\n assert cum_count.entity == copy_feat.entity\n\n\ndef test_groupby_no_data(es):\n cum_count = ft.Feature(es['log']['session_id'],\n groupby=es['log']['session_id'],\n primitive=CumCount)\n last_feat = ft.Feature(cum_count, parent_entity=es['customers'], primitive=Last)\n df = ft.calculate_feature_matrix(entityset=es,\n features=[last_feat],\n cutoff_time=pd.Timestamp(\"2011-04-08\"))\n cvalues = df[last_feat.get_name()].values\n assert len(cvalues) == 3\n assert all([pd.isnull(value) for value in cvalues])\n\n\ndef test_groupby_uses_calc_time(es):\n def projected_amount_left(amount, timestamp, time=None):\n # cumulative sum of amout, with timedelta * constant subtracted\n delta = time - timestamp\n delta_seconds = delta / np.timedelta64(1, 's')\n return amount.cumsum() - (delta_seconds)\n\n class ProjectedAmountRemaining(TransformPrimitive):\n name = \"projected_amount_remaining\"\n uses_calc_time = True\n input_types = [Numeric, DatetimeTimeIndex]\n return_type = Numeric\n uses_full_entity = True\n\n def get_function(self):\n return projected_amount_left\n\n time_since_product = ft.Feature([es['log']['value'], es['log']['datetime']],\n groupby=es['log']['product_id'],\n primitive=ProjectedAmountRemaining)\n df = ft.calculate_feature_matrix(entityset=es,\n features=[time_since_product],\n cutoff_time=pd.Timestamp(\"2011-04-10 11:10:30\"))\n answers = [-88830, -88819, -88803, -88797, -88771, -88770, -88760, -88749,\n -88740, -88227, -1830, -1809, -1750, -1740, -1723, np.nan, np.nan]\n\n for x, y in zip(df[time_since_product.get_name()], answers):\n assert ((pd.isnull(x) and pd.isnull(y)) or x == y)\n",
"import copy\nfrom datetime import datetime\n\nimport pandas as pd\nimport pytest\n\nfrom ..testing_utils import make_ecommerce_entityset\n\nfrom featuretools import Relationship\n\n\[email protected]\ndef entityset():\n return make_ecommerce_entityset()\n\n\[email protected]\ndef values_es(entityset):\n new_es = copy.deepcopy(entityset)\n new_es.normalize_entity('log', 'values', 'value',\n make_time_index=True,\n new_entity_time_index=\"value_time\")\n return new_es\n\n\[email protected]\ndef true_values_lti():\n true_values_lti = pd.Series([datetime(2011, 4, 10, 10, 41, 0),\n datetime(2011, 4, 9, 10, 31, 9),\n datetime(2011, 4, 9, 10, 31, 18),\n datetime(2011, 4, 9, 10, 31, 27),\n datetime(2011, 4, 10, 10, 40, 1),\n datetime(2011, 4, 10, 10, 41, 3),\n datetime(2011, 4, 9, 10, 30, 12),\n datetime(2011, 4, 10, 10, 41, 6),\n datetime(2011, 4, 9, 10, 30, 18),\n datetime(2011, 4, 9, 10, 30, 24),\n datetime(2011, 4, 10, 11, 10, 3)])\n return true_values_lti\n\n\[email protected]\ndef true_sessions_lti():\n sessions_lti = pd.Series([datetime(2011, 4, 9, 10, 30, 24),\n datetime(2011, 4, 9, 10, 31, 27),\n datetime(2011, 4, 9, 10, 40, 0),\n datetime(2011, 4, 10, 10, 40, 1),\n datetime(2011, 4, 10, 10, 41, 6),\n datetime(2011, 4, 10, 11, 10, 3)])\n return sessions_lti\n\n\[email protected]\ndef wishlist_df():\n wishlist_df = pd.DataFrame({\n \"session_id\": [0, 1, 2, 2, 3, 4, 5],\n \"datetime\": [datetime(2011, 4, 9, 10, 30, 15),\n datetime(2011, 4, 9, 10, 31, 30),\n datetime(2011, 4, 9, 10, 30, 30),\n datetime(2011, 4, 9, 10, 35, 30),\n datetime(2011, 4, 10, 10, 41, 0),\n datetime(2011, 4, 10, 10, 39, 59),\n datetime(2011, 4, 10, 11, 10, 2)],\n \"product_id\": ['coke zero', 'taco clock', 'coke zero', 'car',\n 'toothpaste', 'brown bag', 'coke zero'],\n })\n return wishlist_df\n\n\[email protected]\ndef extra_session_df(entityset):\n row_values = {'customer_id': 2,\n 'device_name': 'PC',\n 'device_type': 0,\n 'id': 6}\n row = pd.DataFrame(row_values, index=pd.Index([6], name='id'))\n df = entityset['sessions'].df.append(row, sort=True).sort_index()\n return df\n\n\nclass TestLastTimeIndex(object):\n def test_leaf(self, entityset):\n entityset.add_last_time_indexes()\n log = entityset['log']\n assert len(log.last_time_index) == 17\n for v1, v2 in zip(log.last_time_index, log.df['datetime']):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_leaf_no_time_index(self, entityset):\n entityset.add_last_time_indexes()\n stores = entityset['stores']\n true_lti = pd.Series([None for x in range(6)], dtype='datetime64[ns]')\n assert len(true_lti) == len(stores.last_time_index)\n for v1, v2 in zip(stores.last_time_index, true_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_parent(self, values_es, true_values_lti):\n # test entity with time index and all instances in child entity\n values_es.add_last_time_indexes()\n values = values_es['values']\n assert len(values.last_time_index) == 11\n sorted_lti = values.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_values_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_parent_some_missing(self, values_es, true_values_lti):\n # test entity with time index and not all instances have children\n values = values_es['values']\n\n # add extra value instance with no children\n row_values = {'value': 21.0,\n 'value_time': pd.Timestamp(\"2011-04-10 11:10:02\"),\n 'values_id': 11}\n # make sure index doesn't have same name as column to suppress pandas warning\n row = pd.DataFrame(row_values, index=pd.Index([11]))\n df = values.df.append(row, sort=True)\n df = df[['value', 'value_time']].sort_values(by='value')\n df.index.name = 'values_id'\n values.update_data(df)\n values_es.add_last_time_indexes()\n # lti value should default to instance's time index\n true_values_lti[10] = pd.Timestamp(\"2011-04-10 11:10:02\")\n true_values_lti[11] = pd.Timestamp(\"2011-04-10 11:10:03\")\n\n assert len(values.last_time_index) == 12\n sorted_lti = values.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_values_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_parent_no_time_index(self, entityset, true_sessions_lti):\n # test entity without time index and all instances have children\n entityset.add_last_time_indexes()\n sessions = entityset['sessions']\n assert len(sessions.last_time_index) == 6\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_parent_no_time_index_missing(self, entityset, extra_session_df,\n true_sessions_lti):\n # test entity without time index and not all instance have children\n sessions = entityset['sessions']\n\n # add session instance with no associated log instances\n sessions.update_data(extra_session_df)\n entityset.add_last_time_indexes()\n # since sessions has no time index, default value is NaT\n true_sessions_lti[6] = pd.NaT\n\n assert len(sessions.last_time_index) == 7\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_multiple_children(self, entityset, wishlist_df,\n true_sessions_lti):\n # test all instances in both children\n entityset.entity_from_dataframe(entity_id=\"wishlist_log\",\n dataframe=wishlist_df,\n index='id',\n make_index=True,\n time_index='datetime')\n relationship = Relationship(entityset['sessions']['id'],\n entityset['wishlist_log']['session_id'])\n entityset.add_relationship(relationship)\n entityset.add_last_time_indexes()\n sessions = entityset['sessions']\n # wishlist df has more recent events for two session ids\n true_sessions_lti[1] = pd.Timestamp(\"2011-4-9 10:31:30\")\n true_sessions_lti[3] = pd.Timestamp(\"2011-4-10 10:41:00\")\n\n assert len(sessions.last_time_index) == 6\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_multiple_children_right_missing(self, entityset, wishlist_df,\n true_sessions_lti):\n # test all instances in left child\n sessions = entityset['sessions']\n\n # drop wishlist instance related to id 3 so it's only in log\n wishlist_df.drop(4, inplace=True)\n entityset.entity_from_dataframe(entity_id=\"wishlist_log\",\n dataframe=wishlist_df,\n index='id',\n make_index=True,\n time_index='datetime')\n relationship = Relationship(entityset['sessions']['id'],\n entityset['wishlist_log']['session_id'])\n entityset.add_relationship(relationship)\n entityset.add_last_time_indexes()\n\n # now only session id 1 has newer event in wishlist_log\n true_sessions_lti[1] = pd.Timestamp(\"2011-4-9 10:31:30\")\n\n assert len(sessions.last_time_index) == 6\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_multiple_children_left_missing(self, entityset, extra_session_df,\n wishlist_df, true_sessions_lti):\n # test all instances in right child\n sessions = entityset['sessions']\n\n # add row to sessions so not all session instances are in log\n sessions.update_data(extra_session_df)\n\n # add row to wishlist df so new session instance in in wishlist_log\n row_values = {'session_id': 6,\n 'datetime': pd.Timestamp(\"2011-04-11 11:11:11\"),\n 'product_id': 'toothpaste'}\n row = pd.DataFrame(row_values, index=pd.RangeIndex(start=7, stop=8))\n df = wishlist_df.append(row)\n entityset.entity_from_dataframe(entity_id=\"wishlist_log\",\n dataframe=df,\n index='id',\n make_index=True,\n time_index='datetime')\n relationship = Relationship(entityset['sessions']['id'],\n entityset['wishlist_log']['session_id'])\n entityset.add_relationship(relationship)\n entityset.add_last_time_indexes()\n\n # now wishlist_log has newer events for 3 session ids\n true_sessions_lti[1] = pd.Timestamp(\"2011-4-9 10:31:30\")\n true_sessions_lti[3] = pd.Timestamp(\"2011-4-10 10:41:00\")\n true_sessions_lti[6] = pd.Timestamp(\"2011-04-11 11:11:11\")\n\n assert len(sessions.last_time_index) == 7\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_multiple_children_all_combined(self, entityset, extra_session_df,\n wishlist_df, true_sessions_lti):\n # test some instances in right, some in left, all when combined\n sessions = entityset['sessions']\n\n # add row to sessions so not all session instances are in log\n sessions.update_data(extra_session_df)\n\n # add row to wishlist_log so extra session has child instance\n row_values = {'session_id': 6,\n 'datetime': pd.Timestamp(\"2011-04-11 11:11:11\"),\n 'product_id': 'toothpaste'}\n row = pd.DataFrame(row_values, index=pd.RangeIndex(start=7, stop=8))\n df = wishlist_df.append(row)\n\n # drop instance 4 so wishlist_log does not have session id 3 instance\n df.drop(4, inplace=True)\n entityset.entity_from_dataframe(entity_id=\"wishlist_log\",\n dataframe=df,\n index='id',\n make_index=True,\n time_index='datetime')\n relationship = Relationship(entityset['sessions']['id'],\n entityset['wishlist_log']['session_id'])\n entityset.add_relationship(relationship)\n entityset.add_last_time_indexes()\n\n # wishlist has newer events for 2 sessions\n true_sessions_lti[1] = pd.Timestamp(\"2011-4-9 10:31:30\")\n true_sessions_lti[6] = pd.Timestamp(\"2011-04-11 11:11:11\")\n\n assert len(sessions.last_time_index) == 7\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_multiple_children_both_missing(self, entityset, extra_session_df,\n wishlist_df, true_sessions_lti):\n # test all instances in neither child\n sessions = entityset['sessions']\n\n # add row to sessions to create session with no events\n sessions.update_data(extra_session_df)\n\n entityset.entity_from_dataframe(entity_id=\"wishlist_log\",\n dataframe=wishlist_df,\n index='id',\n make_index=True,\n time_index='datetime')\n relationship = Relationship(entityset['sessions']['id'],\n entityset['wishlist_log']['session_id'])\n entityset.add_relationship(relationship)\n entityset.add_last_time_indexes()\n sessions = entityset['sessions']\n\n # wishlist has 2 newer events and one is NaT\n true_sessions_lti[1] = pd.Timestamp(\"2011-4-9 10:31:30\")\n true_sessions_lti[3] = pd.Timestamp(\"2011-4-10 10:41:00\")\n true_sessions_lti[6] = pd.NaT\n\n assert len(sessions.last_time_index) == 7\n sorted_lti = sessions.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_sessions_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n\n def test_grandparent(self, entityset):\n # test sorting by time works correctly across several generations\n log = entityset[\"log\"]\n customers = entityset[\"customers\"]\n\n # For one user, change a log event to be newer than the user's normal\n # last time index. This event should be from a different session than\n # the current last time index.\n log.df['datetime'][5] = pd.Timestamp(\"2011-4-09 10:40:01\")\n log.df = (log.df.set_index('datetime', append=True)\n .sort_index(level=[1, 0], kind=\"mergesort\")\n .reset_index('datetime', drop=False))\n log.update_data(log.df)\n entityset.add_last_time_indexes()\n\n true_customers_lti = pd.Series([datetime(2011, 4, 9, 10, 40, 1),\n datetime(2011, 4, 10, 10, 41, 6),\n datetime(2011, 4, 10, 11, 10, 3)])\n\n assert len(customers.last_time_index) == 3\n sorted_lti = customers.last_time_index.sort_index()\n for v1, v2 in zip(sorted_lti, true_customers_lti):\n assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2\n"
] |
[
[
"pandas.Series",
"pandas.isnull",
"numpy.isnan",
"numpy.timedelta64",
"pandas.Timestamp"
],
[
"pandas.RangeIndex",
"pandas.Timestamp",
"pandas.Index",
"pandas.isnull"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ut-amrl/ContrastiveSceneContexts
|
[
"622b9cd32ea2dcf8307d25eb2e7ee1c09d220134",
"622b9cd32ea2dcf8307d25eb2e7ee1c09d220134",
"622b9cd32ea2dcf8307d25eb2e7ee1c09d220134",
"622b9cd32ea2dcf8307d25eb2e7ee1c09d220134",
"622b9cd32ea2dcf8307d25eb2e7ee1c09d220134"
] |
[
"downstream/insseg/datasets/scannet.py",
"downstream/votenet/lib/ddp_trainer.py",
"downstream/semseg/datasets/synthia.py",
"downstream/votenet/models/backbone/sparseconv/voxelizer.py",
"pretrain/scannet_pair/point_cloud_extractor.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n# \n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport logging\nimport os\nimport sys\nfrom pathlib import Path\n\nimport torch\nimport numpy as np\nfrom scipy import spatial\n\nfrom datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type\nfrom lib.pc_utils import read_plyfile, save_point_cloud\nfrom lib.utils import read_txt, fast_hist, per_class_iu\nfrom lib.io3d import write_triangle_mesh, create_color_palette\n\nclass ScannetVoxelizationDataset(VoxelizationDataset):\n # added\n NUM_LABELS = 41 # Will be converted to 20 as defined in IGNORE_LABELS.\n NUM_IN_CHANNEL = 3\n CLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',\n 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',\n 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture')\n VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39)\n IGNORE_LABELS = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS))\n \n CLASS_LABELS_INSTANCE = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter',\n 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']\n VALID_CLASS_IDS_INSTANCE = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])\n IGNORE_LABELS_INSTANCE = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS_INSTANCE))\n\n\n # Voxelization arguments\n CLIP_BOUND = None\n TEST_CLIP_BOUND = None\n VOXEL_SIZE = 0.05\n\n # Augmentation arguments\n ROTATION_AUGMENTATION_BOUND = ((-np.pi / 64, np.pi / 64), (-np.pi / 64, np.pi / 64), (-np.pi,\n np.pi))\n TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (0, 0))\n ELASTIC_DISTORT_PARAMS = ((0.2, 0.4), (0.8, 1.6))\n\n ROTATION_AXIS = 'z'\n LOCFEAT_IDX = 2\n IS_FULL_POINTCLOUD_EVAL = True\n\n # If trainval.txt does not exist, copy train.txt and add contents from val.txt\n DATA_PATH_FILE = {\n DatasetPhase.Train: 'scannetv2_train.txt',\n DatasetPhase.Val: 'scannetv2_val.txt',\n DatasetPhase.TrainVal: 'scannetv2_trainval.txt',\n DatasetPhase.Test: 'scannetv2_test.txt',\n DatasetPhase.Debug: 'debug.txt'\n }\n\n def __init__(self,\n config,\n prevoxel_transform=None,\n input_transform=None,\n target_transform=None,\n augment_data=True,\n elastic_distortion=False,\n cache=False,\n phase=DatasetPhase.Train):\n if isinstance(phase, str):\n phase = str2datasetphase_type(phase)\n # Use cropped rooms for train/val\n data_root = config.data.scannet_path\n if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]:\n self.CLIP_BOUND = self.TEST_CLIP_BOUND\n \n data_paths = read_txt(os.path.join(data_root, 'splits', self.DATA_PATH_FILE[phase]))\n if phase == DatasetPhase.Train and config.data.train_file:\n data_paths = read_txt(os.path.join(data_root, 'splits', config.data.train_file))\n \n # data efficiency by sampling points\n self.sampled_inds = {}\n if config.data.sampled_inds and phase == DatasetPhase.Train:\n self.sampled_inds = torch.load(config.data.sampled_inds)\n\n data_paths = [data_path + '.pth' for data_path in data_paths]\n logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase]))\n super().__init__(\n data_paths,\n data_root=data_root,\n prevoxel_transform=prevoxel_transform,\n input_transform=input_transform,\n target_transform=target_transform,\n ignore_label=config.data.ignore_label,\n return_transformation=config.data.return_transformation,\n augment_data=augment_data,\n elastic_distortion=elastic_distortion,\n config=config)\n\n def get_output_id(self, iteration):\n return '_'.join(Path(self.data_paths[iteration]).stem.split('_')[:2])\n\n def _augment_locfeat(self, pointcloud):\n # Assuming that pointcloud is xyzrgb(...), append location feat.\n pointcloud = np.hstack(\n (pointcloud[:, :6], 100 * np.expand_dims(pointcloud[:, self.LOCFEAT_IDX], 1),\n pointcloud[:, 6:]))\n return pointcloud\n\n def load_data(self, index):\n filepath = self.data_root / self.data_paths[index]\n pointcloud = torch.load(filepath)\n coords = pointcloud[0].astype(np.float32)\n feats = pointcloud[1].astype(np.float32)\n labels = pointcloud[2].astype(np.int32)\n instances = pointcloud[3].astype(np.int32) \n if self.sampled_inds:\n scene_name = self.get_output_id(index)\n mask = np.ones_like(labels).astype(np.bool)\n sampled_inds = self.sampled_inds[scene_name]\n mask[sampled_inds] = False\n labels[mask] = 0\n instances[mask] = 0\n\n return coords, feats, labels, instances\n \n def get_original_pointcloud(self, coords, transformation, iteration):\n logging.info('===> Start testing on original pointcloud space.')\n data_path = self.data_paths[iteration]\n fullply_f = self.data_root / data_path\n query_xyz, _, query_label, _ = torch.load(fullply_f)\n\n coords = coords[:, 1:].numpy() + 0.5\n curr_transformation = transformation[0, :16].numpy().reshape(4, 4)\n coords = np.hstack((coords, np.ones((coords.shape[0], 1))))\n coords = (np.linalg.inv(curr_transformation) @ coords.T).T\n\n # Run test for each room.\n from pykeops.numpy import LazyTensor\n from pykeops.numpy.utils import IsGpuAvailable\n \n query_xyz = np.array(query_xyz)\n x_i = LazyTensor( query_xyz[:,None,:] ) # x_i.shape = (1e6, 1, 3)\n y_j = LazyTensor( coords[:,:3][None,:,:] ) # y_j.shape = ( 1, 2e6,3)\n D_ij = ((x_i - y_j) ** 2).sum(-1) # (M**2, N) symbolic matrix of squared distances\n indKNN = D_ij.argKmin(1, dim=1) # Grid <-> Samples, (M**2, K) integer tensor\n inds = indKNN[:,0]\n return inds, query_xyz\n\n def save_prediction(self, coords, pred, transformation, iteration, save_dir):\n print('Running full pointcloud evaluation.')\n #if dataset.IGNORE_LABELS:\n # decode_label_map = {}\n # for k, v in dataset.label_map.items():\n # decode_label_map[v] = k\n # orig_pred = np.array([decode_label_map[x.item()] for x in orig_pred.cpu()], dtype=np.int)\n inds_mapping, xyz = self.get_original_pointcloud(coords, transformation, iteration)\n save = {'points': coords, 'mapping': inds_mapping, 'labels': pred}\n\n # Save prediciton in txt format for submission.\n room_id = self.get_output_id(iteration)\n torch.save(save, os.path.join(save_dir, room_id))\n #np.savetxt(f'{save_dir}/{room_id}.txt', ptc_pred, fmt='%i')\n\n def save_groundtruth(self, coords, gt, transformation, iteration, save_dir):\n save = {'points': coords, 'labels': gt}\n # Save prediciton in txt format for submission.\n room_id = self.get_output_id(iteration)\n torch.save(save, os.path.join(save_dir, room_id))\n\n\nclass ScannetVoxelization2cmDataset(ScannetVoxelizationDataset):\n VOXEL_SIZE = 0.02\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n# \n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport torch\nimport os\nimport sys\nimport logging\nimport numpy as np\nimport importlib\nimport warnings\nimport argparse\n\nimport torch.optim as optim\nimport torch.nn as nn\nfrom datetime import datetime\nfrom models.loss_helper import get_loss as criterion\nfrom tensorboardX import SummaryWriter\nfrom torch.optim import lr_scheduler\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nfrom models.backbone.pointnet2.pytorch_utils import BNMomentumScheduler\nfrom models.dump_helper import dump_results, dump_results_\nfrom models.ap_helper import APCalculator, parse_predictions, parse_groundtruths\n\nfrom omegaconf import OmegaConf\nfrom torch.utils.data import DataLoader\nfrom torch.serialization import default_restore_location\nfrom lib.distributed import multi_proc_run, is_master_proc, get_world_size\n\nclass DetectionTrainer():\n def __init__(self, config):\n self.is_master = is_master_proc(get_world_size()) if get_world_size() > 1 else True\n self.cur_device = torch.cuda.current_device()\n\n # load the configurations\n self.setup_logging()\n if os.path.exists('config.yaml'):\n logging.info('===> Loading exsiting config file')\n config = OmegaConf.load('config.yaml')\n logging.info('===> Loaded exsiting config file')\n logging.info('===> Configurations')\n logging.info(config.pretty())\n\n # Create Dataset and Dataloader\n if config.data.dataset == 'sunrgbd':\n from datasets.sunrgbd.sunrgbd_detection_dataset import SunrgbdDetectionVotesDataset, MAX_NUM_OBJ\n from datasets.sunrgbd.model_util_sunrgbd import SunrgbdDatasetConfig\n dataset_config = SunrgbdDatasetConfig()\n train_dataset = SunrgbdDetectionVotesDataset('train', \n num_points=config.data.num_points,\n augment=True,\n use_color=config.data.use_color, \n use_height=(not config.data.no_height),\n use_v1=(not config.data.use_sunrgbd_v2))\n test_dataset = SunrgbdDetectionVotesDataset(config.test.phase, \n num_points=config.data.num_points,\n augment=False,\n use_color=config.data.use_color, \n use_height=(not config.data.no_height),\n use_v1=(not config.data.use_sunrgbd_v2))\n elif config.data.dataset == 'scannet':\n from datasets.scannet.scannet_detection_dataset import ScannetDetectionDataset, MAX_NUM_OBJ\n from datasets.scannet.model_util_scannet import ScannetDatasetConfig\n dataset_config = ScannetDatasetConfig()\n train_dataset = ScannetDetectionDataset('train', \n num_points=config.data.num_points,\n augment=True,\n use_color=config.data.use_color, \n use_height=(not config.data.no_height),\n by_scenes=config.data.by_scenes,\n by_points=config.data.by_points)\n\n test_dataset = ScannetDetectionDataset(config.test.phase, \n num_points=config.data.num_points,\n augment=False,\n use_color=config.data.use_color, \n use_height=(not config.data.no_height))\n else:\n logging.info('Unknown dataset %s. Exiting...'%(config.data.dataset))\n exit(-1)\n\n COLLATE_FN = None\n if config.data.voxelization:\n from models.backbone.sparseconv.voxelized_dataset import VoxelizationDataset, collate_fn\n train_dataset = VoxelizationDataset(train_dataset, config.data.voxel_size)\n test_dataset = VoxelizationDataset(test_dataset, config.data.voxel_size)\n COLLATE_FN = collate_fn\n logging.info('training: {}, testing: {}'.format(len(train_dataset), len(test_dataset)))\n\n self.sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if get_world_size() > 1 else None\n train_dataloader = DataLoader(\n train_dataset, \n batch_size=config.data.batch_size // config.misc.num_gpus,\n shuffle=(self.sampler is None),\n sampler=self.sampler,\n num_workers=config.data.num_workers, \n collate_fn=COLLATE_FN)\n\n test_dataloader = DataLoader(\n test_dataset, \n batch_size=1,\n shuffle=False, \n num_workers=1, \n collate_fn=COLLATE_FN)\n logging.info('train dataloader: {}, test dataloader: {}'.format(len(train_dataloader),len(test_dataloader)))\n\n # Init the model and optimzier\n MODEL = importlib.import_module('models.' + config.net.model) # import network module\n num_input_channel = int(config.data.use_color)*3 + int(not config.data.no_height)*1\n\n if config.net.model == 'boxnet':\n Detector = MODEL.BoxNet\n else:\n Detector = MODEL.VoteNet\n\n net = Detector(num_class=dataset_config.num_class,\n num_heading_bin=dataset_config.num_heading_bin,\n num_size_cluster=dataset_config.num_size_cluster,\n mean_size_arr=dataset_config.mean_size_arr,\n num_proposal=config.net.num_target,\n input_feature_dim=num_input_channel,\n vote_factor=config.net.vote_factor,\n sampling=config.net.cluster_sampling,\n backbone=config.net.backbone)\n\n if config.net.weights != '':\n #assert config.net.backbone == \"sparseconv\", \"only support sparseconv\"\n print('===> Loading weights: ' + config.net.weights)\n state = torch.load(config.net.weights, map_location=lambda s, l: default_restore_location(s, 'cpu'))\n model = net\n if config.net.is_train:\n model = net.backbone_net\n if config.net.backbone == \"sparseconv\":\n model = net.backbone_net.net\n \n matched_weights = DetectionTrainer.load_state_with_same_shape(model, state['state_dict'])\n model_dict = model.state_dict()\n model_dict.update(matched_weights)\n model.load_state_dict(model_dict)\n\n net.to(self.cur_device)\n if get_world_size() > 1:\n net = torch.nn.parallel.DistributedDataParallel(\n module=net, device_ids=[self.cur_device], output_device=self.cur_device, broadcast_buffers=False) \n\n # Load the Adam optimizer\n self.optimizer = optim.Adam(net.parameters(), lr=config.optimizer.learning_rate, weight_decay=config.optimizer.weight_decay)\n # writer\n if self.is_master:\n self.writer = SummaryWriter(log_dir='tensorboard')\n self.config = config\n self.dataset_config = dataset_config\n self.net = net\n self.train_dataloader = train_dataloader\n self.test_dataloader = test_dataloader\n self.best_mAP = -1\n\n # Used for AP calculation\n self.CONFIG_DICT = {'remove_empty_box':False, 'use_3d_nms':True,\n 'nms_iou':0.25, 'use_old_type_nms':False, 'cls_nms':True,\n 'per_class_proposal': True, 'conf_thresh':0.05, 'dataset_config': dataset_config}\n\n # Used for AP calculation\n self.CONFIG_DICT_TEST = {'remove_empty_box': (not config.test.faster_eval), \n 'use_3d_nms': config.test.use_3d_nms, \n 'nms_iou': config.test.nms_iou,\n 'use_old_type_nms': config.test.use_old_type_nms, \n 'cls_nms': config.test.use_cls_nms, \n 'per_class_proposal': config.test.per_class_proposal,\n 'conf_thresh': config.test.conf_thresh,\n 'dataset_config': dataset_config}\n\n # Load checkpoint if there is any\n self.start_epoch = 0\n CHECKPOINT_PATH = os.path.join('checkpoint.tar')\n if os.path.isfile(CHECKPOINT_PATH):\n checkpoint = torch.load(CHECKPOINT_PATH)\n if get_world_size() > 1:\n _model = self.net.module\n else:\n _model = self.net\n _model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n self.start_epoch = checkpoint['epoch']\n self.best_mAP = checkpoint['best_mAP']\n logging.info(\"-> loaded checkpoint %s (epoch: %d)\"%(CHECKPOINT_PATH, self.start_epoch))\n\n # Decay Batchnorm momentum from 0.5 to 0.999\n # note: pytorch's BN momentum (default 0.1)= 1 - tensorflow's BN momentum\n BN_MOMENTUM_INIT = 0.5\n BN_MOMENTUM_MAX = 0.001\n BN_DECAY_STEP = config.optimizer.bn_decay_step\n BN_DECAY_RATE = config.optimizer.bn_decay_rate\n bn_lbmd = lambda it: max(BN_MOMENTUM_INIT * BN_DECAY_RATE**(int(it / BN_DECAY_STEP)), BN_MOMENTUM_MAX)\n self.bnm_scheduler = BNMomentumScheduler(net, bn_lambda=bn_lbmd, last_epoch=self.start_epoch-1)\n\n def setup_logging(self):\n ch = logging.StreamHandler(sys.stdout)\n logging.getLogger().setLevel(logging.WARN)\n if self.is_master:\n logging.getLogger().setLevel(logging.INFO)\n logging.basicConfig(\n format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',\n datefmt='%m/%d %H:%M:%S',\n handlers=[ch])\n\n @staticmethod\n def load_state_with_same_shape(model, weights):\n model_state = model.state_dict()\n if list(weights.keys())[0].startswith('module.'):\n print(\"Loading multigpu weights with module. prefix...\")\n weights = {k.partition('module.')[2]:weights[k] for k in weights.keys()}\n\n if list(weights.keys())[0].startswith('encoder.'):\n logging.info(\"Loading multigpu weights with encoder. prefix...\")\n weights = {k.partition('encoder.')[2]:weights[k] for k in weights.keys()}\n\n # print(weights.items())\n filtered_weights = {\n k: v for k, v in weights.items() if k in model_state and v.size() == model_state[k].size()\n }\n print(\"Loading weights:\" + ', '.join(filtered_weights.keys()))\n return filtered_weights\n \n @staticmethod\n def get_current_lr(epoch, config):\n lr = config.optimizer.learning_rate\n for i,lr_decay_epoch in enumerate(config.optimizer.lr_decay_steps):\n if epoch >= lr_decay_epoch:\n lr *= config.optimizer.lr_decay_rates[i]\n return lr\n\n @staticmethod\n def adjust_learning_rate(optimizer, epoch, config):\n lr = DetectionTrainer.get_current_lr(epoch, config)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n def train_one_epoch(self, epoch_cnt):\n stat_dict = {} # collect statistics\n DetectionTrainer.adjust_learning_rate(self.optimizer, epoch_cnt, self.config)\n self.bnm_scheduler.step() # decay BN momentum\n self.net.train() # set model to training mode\n for batch_idx, batch_data_label in enumerate(self.train_dataloader):\n for key in batch_data_label:\n if key == 'scan_name':\n continue\n batch_data_label[key] = batch_data_label[key].cuda()\n\n # Forward pass\n self.optimizer.zero_grad()\n inputs = {'point_clouds': batch_data_label['point_clouds']}\n if 'voxel_coords' in batch_data_label:\n inputs.update({\n 'voxel_coords': batch_data_label['voxel_coords'],\n 'voxel_inds': batch_data_label['voxel_inds'],\n 'voxel_feats': batch_data_label['voxel_feats']})\n\n end_points = self.net(inputs)\n \n # Compute loss and gradients, update parameters.\n for key in batch_data_label:\n assert(key not in end_points)\n end_points[key] = batch_data_label[key]\n loss, end_points = criterion(end_points, self.dataset_config)\n loss.backward()\n self.optimizer.step()\n\n # Accumulate statistics and print out\n for key in end_points:\n if 'loss' in key or 'acc' in key or 'ratio' in key:\n if key not in stat_dict: stat_dict[key] = 0\n stat_dict[key] += end_points[key].item()\n\n batch_interval = 10\n if ((batch_idx+1) % batch_interval == 0) and self.is_master:\n logging.info(' ---- batch: %03d ----' % (batch_idx+1))\n for key in stat_dict:\n self.writer.add_scalar('training/{}'.format(key), stat_dict[key]/batch_interval, \n (epoch_cnt*len(self.train_dataloader)+batch_idx)*self.config.data.batch_size)\n for key in sorted(stat_dict.keys()):\n logging.info('mean %s: %f'%(key, stat_dict[key]/batch_interval))\n stat_dict[key] = 0\n\n def evaluate_one_epoch(self, epoch_cnt):\n np.random.seed(0)\n stat_dict = {} # collect statistics\n\n ap_calculator = APCalculator(ap_iou_thresh=self.config.test.ap_iou, class2type_map=self.dataset_config.class2type)\n self.net.eval() # set model to eval mode (for bn and dp)\n for batch_idx, batch_data_label in enumerate(self.test_dataloader):\n if batch_idx % 10 == 0:\n logging.info('Eval batch: %d'%(batch_idx))\n for key in batch_data_label:\n if key == 'scan_name':\n continue\n batch_data_label[key] = batch_data_label[key].cuda()\n \n # Forward pass\n inputs = {'point_clouds': batch_data_label['point_clouds']}\n if 'voxel_coords' in batch_data_label:\n inputs.update({\n 'voxel_coords': batch_data_label['voxel_coords'],\n 'voxel_inds': batch_data_label['voxel_inds'],\n 'voxel_feats': batch_data_label['voxel_feats']})\n\n with torch.no_grad():\n end_points = self.net(inputs)\n\n # Compute loss\n for key in batch_data_label:\n assert(key not in end_points)\n end_points[key] = batch_data_label[key]\n loss, end_points = criterion(end_points, self.dataset_config)\n\n # Accumulate statistics and print out\n for key in end_points:\n if 'loss' in key or 'acc' in key or 'ratio' in key:\n if key not in stat_dict: stat_dict[key] = 0\n stat_dict[key] += end_points[key].item()\n\n batch_pred_map_cls = parse_predictions(end_points, self.CONFIG_DICT) \n batch_gt_map_cls = parse_groundtruths(end_points, self.CONFIG_DICT) \n ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)\n\n # Dump evaluation results for visualization\n if self.config.data.dump_results and batch_idx == 0 and epoch_cnt %10 == 0 and self.is_master:\n dump_results(end_points, 'results', self.dataset_config) \n\n # Log statistics\n logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))\n if self.is_master:\n for key in sorted(stat_dict.keys()):\n self.writer.add_scalar('validation/{}'.format(key), stat_dict[key]/float(batch_idx+1),\n (epoch_cnt+1)*len(self.train_dataloader)*self.config.data.batch_size)\n\n # Evaluate average precision\n metrics_dict = ap_calculator.compute_metrics()\n for key in metrics_dict:\n logging.info('eval %s: %f'%(key, metrics_dict[key]))\n if self.is_master:\n self.writer.add_scalar('validation/mAP{}'.format(self.config.test.ap_iou), metrics_dict['mAP'], (epoch_cnt+1)*len(self.train_dataloader)*self.config.data.batch_size)\n #mean_loss = stat_dict['loss']/float(batch_idx+1)\n\n return metrics_dict['mAP']\n\n def train(self):\n for epoch in range(self.start_epoch, self.config.optimizer.max_epoch):\n logging.info('**** EPOCH %03d ****' % (epoch))\n logging.info('Current learning rate: %f'%(DetectionTrainer.get_current_lr(epoch, self.config)))\n logging.info('Current BN decay momentum: %f'%(self.bnm_scheduler.lmbd(self.bnm_scheduler.last_epoch)))\n logging.info(str(datetime.now()))\n # Reset numpy seed.\n # REF: https://github.com/pytorch/pytorch/issues/5059\n np.random.seed()\n if get_world_size() > 1:\n self.sampler.set_epoch(epoch)\n self.train_one_epoch(epoch)\n\n if epoch % 5 == 4 and self.is_master: # Eval every 5 epochs\n best_mAP = self.evaluate_one_epoch(epoch)\n\n if best_mAP > self.best_mAP:\n self.best_mAP = best_mAP\n # Save checkpoint\n save_dict = {'epoch': epoch+1, # after training one epoch, the start_epoch should be epoch+1\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'best_mAP': self.best_mAP}\n\n if get_world_size() > 1:\n save_dict['state_dict'] = self.net.module.state_dict()\n else:\n save_dict['state_dict'] = self.net.state_dict()\n\n torch.save(save_dict, 'checkpoint.tar')\n OmegaConf.save(self.config, 'config.yaml')\n\n\n @staticmethod\n def write_to_benchmark(data, scene_name):\n from models.ap_helper import flip_axis_back_camera\n OBJ_CLASS_IDS = np.array([3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39])\n os.makedirs('benchmark_output', exist_ok=True)\n bsize = len(scene_name)\n for bsize_ in range(bsize):\n write_list = []\n cur_data = data[bsize_]\n cur_name = scene_name[bsize_]\n for class_id, bbox, score in cur_data:\n bbox = flip_axis_back_camera(bbox)\n minx = np.min(bbox[:,0])\n miny = np.min(bbox[:,1])\n minz = np.min(bbox[:,2])\n maxx = np.max(bbox[:,0])\n maxy = np.max(bbox[:,1])\n maxz = np.max(bbox[:,2])\n write_list.append([minx, miny, minz, maxx,maxy, maxz, OBJ_CLASS_IDS[class_id], score])\n\n np.savetxt(os.path.join('benchmark_output', cur_name+'.txt'), np.array(write_list))\n\n\n def test(self):\n if self.config.test.use_cls_nms:\n assert(self.config.test.use_3d_nms)\n\n AP_IOU_THRESHOLDS = self.config.test.ap_iou_thresholds\n logging.info(str(datetime.now()))\n # Reset numpy seed.\n # REF: https://github.com/pytorch/pytorch/issues/5059\n np.random.seed(0)\n stat_dict = {}\n ap_calculator_list = [APCalculator(iou_thresh, self.dataset_config.class2type) for iou_thresh in AP_IOU_THRESHOLDS]\n self.net.eval() # set model to eval mode (for bn and dp)\n for batch_idx, batch_data_label in enumerate(self.test_dataloader):\n if batch_idx % 10 == 0:\n print('Eval batch: %d'%(batch_idx))\n for key in batch_data_label:\n if key == 'scan_name':\n continue\n batch_data_label[key] = batch_data_label[key].cuda()\n # Forward pass\n inputs = {'point_clouds': batch_data_label['point_clouds']}\n if 'voxel_coords' in batch_data_label:\n inputs.update({\n 'voxel_coords': batch_data_label['voxel_coords'],\n 'voxel_inds': batch_data_label['voxel_inds'],\n 'voxel_feats': batch_data_label['voxel_feats']})\n with torch.no_grad():\n end_points = self.net(inputs)\n\n # Compute loss\n for key in batch_data_label:\n assert(key not in end_points)\n end_points[key] = batch_data_label[key]\n loss, end_points = criterion(end_points, self.dataset_config)\n\n # Accumulate statistics and print out\n for key in end_points:\n if 'loss' in key or 'acc' in key or 'ratio' in key:\n if key not in stat_dict: stat_dict[key] = 0\n stat_dict[key] += end_points[key].item()\n\n batch_pred_map_cls = parse_predictions(end_points, self.CONFIG_DICT_TEST) \n batch_gt_map_cls = parse_groundtruths(end_points, self.CONFIG_DICT_TEST) \n for ap_calculator in ap_calculator_list:\n ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)\n\n # debug\n if self.config.test.write_to_benchmark:\n #from lib.utils.io3d import write_triangle_mesh\n #write_triangle_mesh(batch_data_label['point_clouds'][0].cpu().numpy(), None, None, batch_data_label['scan_name'][0]+'.ply')\n DetectionTrainer.write_to_benchmark(batch_pred_map_cls, batch_data_label['scan_name'])\n \n if self.config.test.save_vis:\n dump_results_(end_points, 'visualization', self.dataset_config)\n\n # Log statistics\n for key in sorted(stat_dict.keys()):\n logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))\n\n # Evaluate average precision\n if not self.config.test.write_to_benchmark:\n for i, ap_calculator in enumerate(ap_calculator_list):\n logging.info('-'*10 + 'iou_thresh: %f'%(AP_IOU_THRESHOLDS[i]) + '-'*10)\n metrics_dict = ap_calculator.compute_metrics()\n for key in metrics_dict:\n logging.info('eval %s: %f'%(key, metrics_dict[key]))\n\n mean_loss = stat_dict['loss']/float(batch_idx+1)\n return mean_loss\n",
"import logging\nimport unittest\nimport imageio\nimport os\nimport os.path as osp\nimport pickle\nimport numpy as np\n\nfrom collections import defaultdict\nfrom plyfile import PlyData\n\nfrom lib.pc_utils import Camera, read_plyfile\nfrom lib.dataset import DictDataset, VoxelizationDataset, TemporalVoxelizationDataset, \\\n str2datasetphase_type, DatasetPhase\nfrom lib.transforms import cfl_collate_fn_factory\nfrom lib.utils import read_txt, debug_on\n\n\nclass SynthiaDataset(DictDataset):\n NUM_LABELS = 16\n\n def __init__(self, data_path_file, input_transform=None, target_transform=None):\n with open(data_path_file, 'r') as f:\n data_paths = pickle.load(f)\n super(SynthiaDataset, self).__init__(data_paths, input_transform, target_transform)\n\n @staticmethod\n def load_extrinsics(extrinsics_file):\n \"\"\"Load the camera extrinsics from a .txt file.\n \"\"\"\n lines = read_txt(extrinsics_file)\n params = [float(x) for x in lines[0].split(' ')]\n extrinsics_matrix = np.asarray(params).reshape([4, 4])\n return extrinsics_matrix\n\n @staticmethod\n def load_intrinsics(intrinsics_file):\n \"\"\"Load the camera intrinsics from a intrinsics.txt file.\n\n intrinsics.txt: a text file containing 4 values that represent (in this order) {focal length,\n principal-point-x, principal-point-y, baseline (m) with the corresponding right\n camera}\n \"\"\"\n lines = read_txt(intrinsics_file)\n assert len(lines) == 7\n intrinsics = {\n 'focal_length': float(lines[0]),\n 'pp_x': float(lines[2]),\n 'pp_y': float(lines[4]),\n 'baseline': float(lines[6]),\n }\n return intrinsics\n\n @staticmethod\n def load_depth(depth_file):\n \"\"\"Read a single depth map (.png) file.\n\n 1280x760\n 760 rows, 1280 columns.\n Depth is encoded in any of the 3 channels in centimetres as an ushort.\n \"\"\"\n img = np.asarray(imageio.imread(depth_file, format='PNG-FI')) # uint16\n img = img.astype(np.int32) # Convert to int32 for torch compatibility\n return img\n\n @staticmethod\n def load_label(label_file):\n \"\"\"Load the ground truth semantic segmentation label.\n\n Annotations are given in two channels. The first channel contains the class of that pixel\n (see the table below). The second channel contains the unique ID of the instance for those\n objects that are dynamic (cars, pedestrians, etc.).\n\n Class R G B ID\n\n Void 0 0 0 0\n Sky 128 128 128 1\n Building 128 0 0 2\n Road 128 64 128 3\n Sidewalk 0 0 192 4\n Fence 64 64 128 5\n Vegetation 128 128 0 6\n Pole 192 192 128 7\n Car 64 0 128 8\n Traffic Sign 192 128 128 9\n Pedestrian 64 64 0 10\n Bicycle 0 128 192 11\n Lanemarking 0 172 0 12\n Reserved - - - 13\n Reserved - - - 14\n Traffic Light 0 128 128 15\n \"\"\"\n img = np.asarray(imageio.imread(label_file, format='PNG-FI')) # uint16\n img = img.astype(np.int32) # Convert to int32 for torch compatibility\n return img\n\n @staticmethod\n def load_rgb(rgb_file):\n \"\"\"Load RGB images. 1280x760 RGB images used for training.\n\n 760 rows, 1280 columns.\n \"\"\"\n img = np.array(imageio.imread(rgb_file)) # uint8\n return img\n\n\nclass SynthiaVoxelizationDataset(VoxelizationDataset):\n \"\"\"Load the ground truth semantic segmentation label.\n Annotations are given in two channels. The first channel contains the class of that pixel\n (see the table below). The second channel contains the unique ID of the instance for those\n objects that are dynamic (cars, pedestrians, etc.).\n Class R G B ID\n Void 0 0 0 0\n Sky 128 128 128 1\n Building 128 0 0 2\n Road 128 64 128 3\n Sidewalk 0 0 192 4\n Fence 64 64 128 5\n Vegetation 128 128 0 6\n Pole 192 192 128 7\n Car 64 0 128 8\n Traffic Sign 192 128 128 9\n Pedestrian 64 64 0 10\n Bicycle 0 128 192 11\n Lanemarking 0 172 0 12\n Reserved - - - 13\n Reserved - - - 14\n Traffic Light 0 128 128 15\n \"\"\"\n\n \n CLASS_LABELS = ('building', 'road', 'sidewalk', 'fence', 'vegetation', 'pole', 'car',\n 'sign', 'pedestrian', 'cyclist', 'lanemarking', 'traffic light')\n VALID_CLASS_IDS = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15)\n\n # Voxelization arguments\n CLIP_BOUND = ((-1800, 1800), (-1800, 1800), (-1800, 1800))\n TEST_CLIP_BOUND = ((-2500, 2500), (-2500, 2500), (-2500, 2500))\n VOXEL_SIZE = 15 # cm\n\n PREVOXELIZATION_VOXEL_SIZE = 7.5\n # Elastic distortion, (granularity, magitude) pairs\n # ELASTIC_DISTORT_PARAMS = ((80, 300),)\n\n # Augmentation arguments\n ROTATION_AUGMENTATION_BOUND = ((0, 0), (-np.pi, np.pi), (0, 0))\n TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.1, 0.1), (0, 0), (-0.1, 0.1))\n\n ROTATION_AXIS = 'y'\n LOCFEAT_IDX = 1\n NUM_LABELS = 16 # Automatically subtract ignore labels after processed\n IGNORE_LABELS = (0, 1, 13, 14) # void, sky, reserved, reserved\n\n # Split used in the Minkowski ConvNet, CVPR'19\n DATA_PATH_FILE = {\n DatasetPhase.Train: 'train_cvpr19.txt',\n DatasetPhase.Val: 'val_cvpr19.txt',\n DatasetPhase.Test: 'test_cvpr19.txt'\n }\n\n def __init__(self,\n config,\n prevoxel_transform=None,\n input_transform=None,\n target_transform=None,\n augment_data=True,\n elastic_distortion=False,\n cache=False,\n phase=DatasetPhase.Train):\n if isinstance(phase, str):\n phase = str2datasetphase_type(phase)\n if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]:\n self.CLIP_BOUND = self.TEST_CLIP_BOUND\n data_root = config.data.synthia_path\n data_paths = read_txt(osp.join('/checkpoint/jihou/data/synthia4d/splits', self.DATA_PATH_FILE[phase]))\n if phase == DatasetPhase.Train:\n data_paths = data_paths[:int(len(data_paths)*config.data.data_ratio)]\n data_paths = [d.split()[0] for d in data_paths]\n logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase]))\n super().__init__(\n data_paths,\n data_root=data_root,\n input_transform=input_transform,\n target_transform=target_transform,\n ignore_label=config.data.ignore_label,\n return_transformation=config.data.return_transformation,\n augment_data=augment_data,\n elastic_distortion=elastic_distortion,\n config=config)\n\n def load_data(self, index):\n filepath = self.data_root / self.data_paths[index]\n plydata = PlyData.read(filepath)\n data = plydata.elements[0].data\n coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T\n feats = np.array([data['r'], data['g'], data['b']], dtype=np.float32).T\n labels = np.array(data['l'], dtype=np.int32)\n instances = np.zeros_like(labels)\n return coords, feats, labels, instances\n\n\nclass SynthiaCVPR15cmVoxelizationDataset(SynthiaVoxelizationDataset):\n pass\n\n\nclass SynthiaCVPR30cmVoxelizationDataset(SynthiaVoxelizationDataset):\n VOXEL_SIZE = 30\n\n\nclass SynthiaAllSequencesVoxelizationDataset(SynthiaVoxelizationDataset):\n DATA_PATH_FILE = {\n DatasetPhase.Train: 'train_raw.txt',\n DatasetPhase.Val: 'val_raw.txt',\n DatasetPhase.Test: 'test_raw.txt'\n }\n\n\nclass TestSynthia(unittest.TestCase):\n\n @debug_on()\n def test(self):\n from torch.utils.data import DataLoader\n from lib.utils import Timer\n from config import get_config\n config = get_config()\n\n dataset = SynthiaVoxelizationDataset(config)\n timer = Timer()\n\n data_loader = DataLoader(\n dataset=dataset,\n collate_fn=cfl_collate_fn_factory(limit_numpoints=False),\n num_workers=0,\n batch_size=4,\n shuffle=True)\n\n # Start from index 1\n # for i, batch in enumerate(data_loader, 1):\n iter = data_loader.__iter__()\n for i in range(100):\n timer.tic()\n batch = iter.next()\n print(batch, timer.toc())\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"import collections\n\nimport numpy as np\nimport MinkowskiEngine as ME\nfrom scipy.linalg import expm, norm\n\n\n# Rotation matrix along axis with angle theta\ndef M(axis, theta):\n return expm(np.cross(np.eye(3), axis / norm(axis) * theta))\n\n\nclass Voxelizer:\n\n def __init__(self,\n voxel_size=1,\n clip_bound=None,\n use_augmentation=False,\n scale_augmentation_bound=None,\n rotation_augmentation_bound=None,\n translation_augmentation_ratio_bound=None,\n ignore_label=255):\n \"\"\"\n Args:\n voxel_size: side length of a voxel\n clip_bound: boundary of the voxelizer. Points outside the bound will be deleted\n expects either None or an array like ((-100, 100), (-100, 100), (-100, 100)).\n scale_augmentation_bound: None or (0.9, 1.1)\n rotation_augmentation_bound: None or ((np.pi / 6, np.pi / 6), None, None) for 3 axis.\n Use random order of x, y, z to prevent bias.\n translation_augmentation_bound: ((-5, 5), (0, 0), (-10, 10))\n ignore_label: label assigned for ignore (not a training label).\n \"\"\"\n self.voxel_size = voxel_size\n self.clip_bound = clip_bound\n self.ignore_label = ignore_label\n\n # Augmentation\n self.use_augmentation = use_augmentation\n self.scale_augmentation_bound = scale_augmentation_bound\n self.rotation_augmentation_bound = rotation_augmentation_bound\n self.translation_augmentation_ratio_bound = translation_augmentation_ratio_bound\n\n def get_transformation_matrix(self):\n voxelization_matrix, rotation_matrix = np.eye(4), np.eye(4)\n # Get clip boundary from config or pointcloud.\n # Get inner clip bound to crop from.\n\n # Transform pointcloud coordinate to voxel coordinate.\n # 1. Random rotation\n rot_mat = np.eye(3)\n if self.use_augmentation and self.rotation_augmentation_bound is not None:\n if isinstance(self.rotation_augmentation_bound, collections.Iterable):\n rot_mats = []\n for axis_ind, rot_bound in enumerate(self.rotation_augmentation_bound):\n theta = 0\n axis = np.zeros(3)\n axis[axis_ind] = 1\n if rot_bound is not None:\n theta = np.random.uniform(*rot_bound)\n rot_mats.append(M(axis, theta))\n # Use random order\n np.random.shuffle(rot_mats)\n rot_mat = rot_mats[0] @ rot_mats[1] @ rot_mats[2]\n else:\n raise ValueError()\n rotation_matrix[:3, :3] = rot_mat\n # 2. Scale and translate to the voxel space.\n scale = 1 / self.voxel_size\n if self.use_augmentation and self.scale_augmentation_bound is not None:\n scale *= np.random.uniform(*self.scale_augmentation_bound)\n np.fill_diagonal(voxelization_matrix[:3, :3], scale)\n # Get final transformation matrix.\n return voxelization_matrix, rotation_matrix\n\n def clip(self, coords, center=None, trans_aug_ratio=None):\n bound_min = np.min(coords, 0).astype(float)\n bound_max = np.max(coords, 0).astype(float)\n bound_size = bound_max - bound_min\n if center is None:\n center = bound_min + bound_size * 0.5\n if trans_aug_ratio is not None:\n trans = np.multiply(trans_aug_ratio, bound_size)\n center += trans\n lim = self.clip_bound\n\n if isinstance(self.clip_bound, (int, float)):\n if bound_size.max() < self.clip_bound:\n return None\n else:\n clip_inds = ((coords[:, 0] >= (-lim + center[0])) &\n (coords[:, 0] < (lim + center[0])) &\n (coords[:, 1] >= (-lim + center[1])) &\n (coords[:, 1] < (lim + center[1])) &\n (coords[:, 2] >= (-lim + center[2])) &\n (coords[:, 2] < (lim + center[2])))\n return clip_inds\n\n # Clip points outside the limit\n clip_inds = ((coords[:, 0] >= (lim[0][0] + center[0])) &\n (coords[:, 0] < (lim[0][1] + center[0])) &\n (coords[:, 1] >= (lim[1][0] + center[1])) &\n (coords[:, 1] < (lim[1][1] + center[1])) &\n (coords[:, 2] >= (lim[2][0] + center[2])) &\n (coords[:, 2] < (lim[2][1] + center[2])))\n return clip_inds\n\n def voxelize(self, coords, feats, labels, center=None):\n assert coords.shape[1] == 3 and coords.shape[0] == feats.shape[0] and coords.shape[0]\n if self.clip_bound is not None:\n trans_aug_ratio = np.zeros(3)\n if self.use_augmentation and self.translation_augmentation_ratio_bound is not None:\n for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound):\n trans_aug_ratio[axis_ind] = np.random.uniform(\n *trans_ratio_bound)\n\n clip_inds = self.clip(coords, center, trans_aug_ratio)\n if clip_inds is not None:\n coords, feats = coords[clip_inds], feats[clip_inds]\n if labels is not None:\n labels = labels[clip_inds]\n\n # Get rotation and scale\n M_v, M_r = self.get_transformation_matrix()\n # Apply transformations\n rigid_transformation = M_v\n if self.use_augmentation:\n rigid_transformation = M_r @ rigid_transformation\n\n homo_coords = np.hstack(\n (coords, np.ones((coords.shape[0], 1), dtype=coords.dtype)))\n coords_aug = np.floor(homo_coords @ rigid_transformation.T[:, :3])\n\n # Align all coordinates to the origin.\n min_coords = coords_aug.min(0)\n M_t = np.eye(4)\n M_t[:3, -1] = -min_coords\n rigid_transformation = M_t @ rigid_transformation\n coords_aug = np.floor(coords_aug - min_coords)\n\n # key = self.hash(coords_aug) # floor happens by astype(np.uint64)\n coords_aug, feats, labels = ME.utils.sparse_quantize(\n coords_aug, feats, labels=labels, ignore_label=self.ignore_label)\n\n return coords_aug, feats, labels, rigid_transformation.flatten()\n\n def voxelize_temporal(self,\n coords_t,\n feats_t,\n labels_t,\n centers=None,\n return_transformation=False):\n # Legacy code, remove\n if centers is None:\n centers = [\n None,\n ] * len(coords_t)\n coords_tc, feats_tc, labels_tc, transformation_tc = [], [], [], []\n\n # ######################### Data Augmentation #############################\n # Get rotation and scale\n M_v, M_r = self.get_transformation_matrix()\n # Apply transformations\n rigid_transformation = M_v\n if self.use_augmentation:\n rigid_transformation = M_r @ rigid_transformation\n # ######################### Voxelization #############################\n # Voxelize coords\n for coords, feats, labels, center in zip(coords_t, feats_t, labels_t, centers):\n\n ###################################\n # Clip the data if bound exists\n if self.clip_bound is not None:\n trans_aug_ratio = np.zeros(3)\n if self.use_augmentation and self.translation_augmentation_ratio_bound is not None:\n for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound):\n trans_aug_ratio[axis_ind] = np.random.uniform(\n *trans_ratio_bound)\n\n clip_inds = self.clip(coords, center, trans_aug_ratio)\n if clip_inds is not None:\n coords, feats = coords[clip_inds], feats[clip_inds]\n if labels is not None:\n labels = labels[clip_inds]\n ###################################\n\n homo_coords = np.hstack(\n (coords, np.ones((coords.shape[0], 1), dtype=coords.dtype)))\n coords_aug = np.floor(homo_coords @ rigid_transformation.T)[:, :3]\n\n coords_aug, feats, labels = ME.utils.sparse_quantize(\n coords_aug, feats, labels=labels, ignore_label=self.ignore_label)\n\n coords_tc.append(coords_aug)\n feats_tc.append(feats)\n labels_tc.append(labels)\n transformation_tc.append(rigid_transformation.flatten())\n\n return_args = [coords_tc, feats_tc, labels_tc]\n if return_transformation:\n return_args.append(transformation_tc)\n\n return tuple(return_args)\n\n\ndef test():\n N = 16575\n coords = np.random.rand(N, 3) * 10\n feats = np.random.rand(N, 4)\n labels = np.floor(np.random.rand(N) * 3)\n coords[:3] = 0\n labels[:3] = 2\n voxelizer = Voxelizer()\n print(voxelizer.voxelize(coords, feats, labels))\n\n\nif __name__ == '__main__':\n test()\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n# \n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport glob, os\nimport numpy as np\nimport cv2\nimport argparse\n\nfrom plyfile import PlyData, PlyElement\n\n# params\nparser = argparse.ArgumentParser()\n# data paths\nparser.add_argument('--input_path', required=True, help='path to sens file to read')\nparser.add_argument('--output_path', required=True, help='path to output folder')\nparser.add_argument('--save_npz', action='store_true')\nopt = parser.parse_args()\nprint(opt)\n\nif not os.path.exists(opt.output_path):\n os.mkdir(opt.output_path)\n\n# Load Depth Camera Intrinsic\ndepth_intrinsic = np.loadtxt(opt.input_path + '/intrinsic/intrinsic_depth.txt')\nprint('Depth intrinsic: ')\nprint(depth_intrinsic)\n\n# Compute Camrea Distance (just for demo, so you can choose the camera distance in frame sampling)\nposes = sorted(glob.glob(opt.input_path + '/pose/*.txt'), key=lambda a: int(os.path.basename(a).split('.')[0]))\ndepths = sorted(glob.glob(opt.input_path + '/depth/*.png'), key=lambda a: int(os.path.basename(a).split('.')[0]))\ncolors = sorted(glob.glob(opt.input_path + '/color/*.png'), key=lambda a: int(os.path.basename(a).split('.')[0]))\n\n# # Get Aligned Point Clouds.\nfor ind, (pose, depth, color) in enumerate(zip(poses, depths, colors)):\n name = os.path.basename(pose).split('.')[0]\n\n if os.path.exists(opt.output_path + '/{}.npz'.format(name)):\n continue\n\n try:\n print('='*50, ': {}'.format(pose))\n depth_img = cv2.imread(depth, -1) # read 16bit grayscale image\n mask = (depth_img != 0)\n color_image = cv2.imread(color)\n color_image = cv2.resize(color_image, (640, 480))\n color_image = np.reshape(color_image[mask], [-1,3])\n colors = np.zeros_like(color_image)\n colors[:,0] = color_image[:,2]\n colors[:,1] = color_image[:,1]\n colors[:,2] = color_image[:,0]\n\n pose = np.loadtxt(poses[ind])\n print('Camera pose: ')\n print(pose)\n \n depth_shift = 1000.0\n x,y = np.meshgrid(np.linspace(0,depth_img.shape[1]-1,depth_img.shape[1]), np.linspace(0,depth_img.shape[0]-1,depth_img.shape[0]))\n uv_depth = np.zeros((depth_img.shape[0], depth_img.shape[1], 3))\n uv_depth[:,:,0] = x\n uv_depth[:,:,1] = y\n uv_depth[:,:,2] = depth_img/depth_shift\n uv_depth = np.reshape(uv_depth, [-1,3])\n uv_depth = uv_depth[np.where(uv_depth[:,2]!=0),:].squeeze()\n \n intrinsic_inv = np.linalg.inv(depth_intrinsic)\n fx = depth_intrinsic[0,0]\n fy = depth_intrinsic[1,1]\n cx = depth_intrinsic[0,2]\n cy = depth_intrinsic[1,2]\n bx = depth_intrinsic[0,3]\n by = depth_intrinsic[1,3]\n point_list = []\n n = uv_depth.shape[0]\n points = np.ones((n,4))\n X = (uv_depth[:,0]-cx)*uv_depth[:,2]/fx + bx\n Y = (uv_depth[:,1]-cy)*uv_depth[:,2]/fy + by\n points[:,0] = X\n points[:,1] = Y\n points[:,2] = uv_depth[:,2]\n points_world = np.dot(points, np.transpose(pose))\n print(points_world.shape)\n\n pcd_save = np.zeros((points_world.shape[0], 7))\n pcd_save[:,:3] = points_world[:,:3]\n pcd_save[:,3:6] = colors\n\n print('Saving npz file...')\n np.savez(opt.output_path + '/{}.npz'.format(name), pcd=pcd_save)\n except:\n continue\n"
] |
[
[
"numpy.expand_dims",
"numpy.ones_like",
"torch.load",
"numpy.linalg.inv",
"numpy.ones",
"numpy.array"
],
[
"torch.utils.data.distributed.DistributedSampler",
"numpy.random.seed",
"torch.cuda.current_device",
"torch.load",
"numpy.min",
"torch.utils.data.DataLoader",
"numpy.max",
"torch.serialization.default_restore_location",
"torch.no_grad",
"numpy.array",
"torch.nn.parallel.DistributedDataParallel",
"torch.save"
],
[
"numpy.asarray",
"numpy.array",
"numpy.zeros_like"
],
[
"numpy.multiply",
"numpy.min",
"numpy.eye",
"numpy.random.shuffle",
"numpy.ones",
"numpy.max",
"numpy.random.rand",
"numpy.floor",
"numpy.fill_diagonal",
"scipy.linalg.norm",
"numpy.random.uniform",
"numpy.zeros"
],
[
"numpy.linspace",
"numpy.reshape",
"numpy.linalg.inv",
"numpy.ones",
"numpy.zeros_like",
"numpy.transpose",
"numpy.zeros",
"numpy.where",
"numpy.loadtxt"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
blackredscarf/pytorch-SkipGram
|
[
"a9fa5a888a7b0c6170eb1fe146e59f54041b2613"
] |
[
"eval/ranking.py"
] |
[
"\"\"\"\nReference: https://github.com/mfaruqui/eval-word-vectors\n\"\"\"\n\nimport math\nimport numpy\nfrom operator import itemgetter\nfrom numpy.linalg import norm\n\nEPSILON = 1e-6\n\ndef euclidean(vec1, vec2):\n diff = vec1 - vec2\n return math.sqrt(diff.dot(diff))\n\ndef cosine_sim(vec1, vec2):\n vec1 += EPSILON * numpy.ones(len(vec1))\n vec2 += EPSILON * numpy.ones(len(vec1))\n return vec1.dot(vec2)/(norm(vec1)*norm(vec2))\n\ndef assign_ranks(item_dict):\n ranked_dict = {}\n sorted_list = [(key, val) for (key, val) in sorted(item_dict.items(),\n key=itemgetter(1),\n reverse=True)]\n for i, (key, val) in enumerate(sorted_list):\n same_val_indices = []\n for j, (key2, val2) in enumerate(sorted_list):\n if val2 == val:\n same_val_indices.append(j+1)\n if len(same_val_indices) == 1:\n ranked_dict[key] = i+1\n else:\n ranked_dict[key] = 1.*sum(same_val_indices)/len(same_val_indices)\n return ranked_dict\n\ndef correlation(dict1, dict2):\n avg1 = 1.*sum([val for key, val in dict1.iteritems()])/len(dict1)\n avg2 = 1.*sum([val for key, val in dict2.iteritems()])/len(dict2)\n numr, den1, den2 = (0., 0., 0.)\n for val1, val2 in zip(dict1.itervalues(), dict2.itervalues()):\n numr += (val1 - avg1) * (val2 - avg2)\n den1 += (val1 - avg1) ** 2\n den2 += (val2 - avg2) ** 2\n return numr / math.sqrt(den1 * den2)\n\ndef spearmans_rho(ranked_dict1, ranked_dict2):\n assert len(ranked_dict1) == len(ranked_dict2)\n if len(ranked_dict1) == 0 or len(ranked_dict2) == 0:\n return 0.\n x_avg = 1.*sum([val for val in ranked_dict1.values()])/len(ranked_dict1)\n y_avg = 1.*sum([val for val in ranked_dict2.values()])/len(ranked_dict2)\n num, d_x, d_y = (0., 0., 0.)\n for key in ranked_dict1.keys():\n xi = ranked_dict1[key]\n yi = ranked_dict2[key]\n num += (xi-x_avg)*(yi-y_avg)\n d_x += (xi-x_avg)**2\n d_y += (yi-y_avg)**2\n return num/(math.sqrt(d_x*d_y))\n"
] |
[
[
"numpy.linalg.norm"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
knorth55/chainer-light-head-rcnn
|
[
"4408311384d5abe550cd6ad004fa190aaced2c95",
"4408311384d5abe550cd6ad004fa190aaced2c95",
"4408311384d5abe550cd6ad004fa190aaced2c95"
] |
[
"tests/functions_tests/test_psroi_max_align_2d.py",
"tests/functions_tests/test_psroi_average_align_2d.py",
"light_head_rcnn/links/model/light_head_rcnn_resnet101.py"
] |
[
"import chainer\nfrom chainer.backends import cuda\nfrom chainer import gradient_check\nfrom chainer import testing\nfrom chainer.testing import attr\nfrom chainer.testing import condition\nimport numpy as np\nimport unittest\n\nfrom light_head_rcnn import functions\n\n\nclass TestPSROIMaxPolling2D(unittest.TestCase):\n\n def setUp(self):\n self.N = 3\n self.group_size = 2\n self.out_c = 2\n self.n_channels = self.group_size * self.group_size * self.out_c\n self.x = np.arange(\n self.N * self.n_channels * 10 * 12,\n dtype=np.float32).reshape((self.N, self.n_channels, 10, 12))\n np.random.shuffle(self.x)\n self.x = 2 * self.x / self.x.size - 1\n self.x = self.x.astype(np.float32)\n self.rois = np.array(\n [[0, 0, 7, 7],\n [1, 0, 5, 12],\n [0, 1, 10, 5],\n [3, 3, 4, 4]],\n dtype=np.float32\n )\n self.roi_indices = np.array([0, 2, 1, 0], dtype=np.int32)\n self.n_roi = self.rois.shape[0]\n self.out_h, self.out_w = 4, 4\n self.spatial_scale = 1.0\n self.gy = np.random.uniform(\n -1, 1, (self.n_roi, self.out_c, self.out_h, self.out_w))\n self.gy = self.gy.astype(np.float32)\n self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}\n\n def check_forward(self, x_data, roi_data, roi_index_data):\n x = chainer.Variable(x_data)\n rois = chainer.Variable(roi_data)\n roi_indices = chainer.Variable(roi_index_data)\n y = functions.psroi_max_align_2d(\n x, rois, roi_indices, self.out_c, self.out_h, self.out_w,\n self.spatial_scale, self.group_size)\n self.assertEqual(y.data.dtype, np.float32)\n y_data = cuda.to_cpu(y.data)\n self.assertEqual(\n (self.n_roi, self.out_c, self.out_h, self.out_w), y_data.shape)\n\n @condition.retry(3)\n def test_forward_cpu(self):\n self.check_forward(self.x, self.rois, self.roi_indices)\n\n @attr.gpu\n @condition.retry(3)\n def test_forward_gpu(self):\n self.check_forward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.rois),\n cuda.to_gpu(self.roi_indices))\n\n def check_backward(self, x_data, roi_data, roi_index_data, y_grad_data):\n gradient_check.check_backward(\n functions.PSROIMaxAlign2D(\n self.out_c, self.out_h, self.out_w,\n self.spatial_scale, self.group_size),\n (x_data, roi_data, roi_index_data), y_grad_data,\n no_grads=[False, True, True], **self.check_backward_options)\n\n @condition.retry(3)\n def test_backward_cpu(self):\n self.check_backward(self.x, self.rois, self.roi_indices, self.gy)\n\n @attr.gpu\n @condition.retry(3)\n def test_backward_gpu(self):\n self.check_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.rois),\n cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))\n\n def apply_backward(self, x_data, roi_data, roi_index_data, y_grad_data):\n x = chainer.Variable(x_data)\n rois = chainer.Variable(roi_data)\n roi_indices = chainer.Variable(roi_index_data)\n y = functions.psroi_max_align_2d(\n x, rois, roi_indices, self.out_c, self.out_h, self.out_w,\n self.spatial_scale, self.group_size)\n x.cleargrad()\n y.grad = y_grad_data\n y.backward()\n return x, y\n\n @attr.gpu\n @condition.retry(3)\n def test_consistency_with_gpu(self):\n x_cpu, y_cpu = self.apply_backward(\n self.x, self.rois, self.roi_indices, self.gy)\n x_gpu, y_gpu = self.apply_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.rois),\n cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))\n testing.assert_allclose(y_cpu.data, y_gpu.data)\n testing.assert_allclose(x_cpu.grad, x_gpu.grad)\n\n\ntesting.run_module(__name__, __file__)\n",
"import chainer\nfrom chainer.backends import cuda\nfrom chainer import gradient_check\nfrom chainer import testing\nfrom chainer.testing import attr\nfrom chainer.testing import condition\nimport numpy as np\nimport unittest\n\nfrom light_head_rcnn import functions\n\n\nclass TestPSROIAverageAlign2D(unittest.TestCase):\n\n def setUp(self):\n self.N = 3\n self.group_size = 2\n self.out_c = 2\n self.n_channels = self.group_size * self.group_size * self.out_c\n self.x = np.arange(\n self.N * self.n_channels * 10 * 12,\n dtype=np.float32).reshape((self.N, self.n_channels, 10, 12))\n np.random.shuffle(self.x)\n self.x = 2 * self.x / self.x.size - 1\n self.x = self.x.astype(np.float32)\n self.rois = np.array(\n [[0, 0, 7, 7],\n [1, 0, 5, 12],\n [0, 1, 10, 5],\n [3, 3, 4, 4]],\n dtype=np.float32\n )\n self.roi_indices = np.array([0, 2, 1, 0], dtype=np.int32)\n self.n_roi = self.rois.shape[0]\n self.out_h, self.out_w = 4, 4\n self.spatial_scale = 1.0\n self.gy = np.random.uniform(\n -1, 1, (self.n_roi, self.out_c, self.out_h, self.out_w))\n self.gy = self.gy.astype(np.float32)\n self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}\n\n def check_forward(self, x_data, roi_data, roi_index_data):\n x = chainer.Variable(x_data)\n rois = chainer.Variable(roi_data)\n roi_indices = chainer.Variable(roi_index_data)\n y = functions.psroi_average_align_2d(\n x, rois, roi_indices, self.out_c, self.out_h, self.out_w,\n self.spatial_scale, self.group_size)\n self.assertEqual(y.data.dtype, np.float32)\n y_data = cuda.to_cpu(y.data)\n self.assertEqual(\n (self.n_roi, self.out_c, self.out_h, self.out_w), y_data.shape)\n\n @condition.retry(3)\n def test_forward_cpu(self):\n self.check_forward(self.x, self.rois, self.roi_indices)\n\n @attr.gpu\n @condition.retry(3)\n def test_forward_gpu(self):\n self.check_forward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.rois),\n cuda.to_gpu(self.roi_indices))\n\n def check_backward(self, x_data, roi_data, roi_index_data, y_grad_data):\n gradient_check.check_backward(\n functions.PSROIAverageAlign2D(\n self.out_c, self.out_h, self.out_w,\n self.spatial_scale, self.group_size),\n (x_data, roi_data, roi_index_data), y_grad_data,\n no_grads=[False, True, True], **self.check_backward_options)\n\n @condition.retry(3)\n def test_backward_cpu(self):\n self.check_backward(self.x, self.rois, self.roi_indices, self.gy)\n\n @attr.gpu\n @condition.retry(3)\n def test_backward_gpu(self):\n self.check_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.rois),\n cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))\n\n def apply_backward(self, x_data, roi_data, roi_index_data, y_grad_data):\n x = chainer.Variable(x_data)\n rois = chainer.Variable(roi_data)\n roi_indices = chainer.Variable(roi_index_data)\n y = functions.psroi_average_align_2d(\n x, rois, roi_indices, self.out_c, self.out_h, self.out_w,\n self.spatial_scale, self.group_size)\n x.cleargrad()\n y.grad = y_grad_data\n y.backward()\n return x, y\n\n @attr.gpu\n @condition.retry(3)\n def test_consistency_with_gpu(self):\n x_cpu, y_cpu = self.apply_backward(\n self.x, self.rois, self.roi_indices, self.gy)\n x_gpu, y_gpu = self.apply_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.rois),\n cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))\n testing.assert_allclose(y_cpu.data, y_gpu.data)\n testing.assert_allclose(x_cpu.grad, x_gpu.grad)\n\n\ntesting.run_module(__name__, __file__)\n",
"from __future__ import division\n\nimport numpy as np\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainercv.links import Conv2DBNActiv\nfrom chainercv.links.model.resnet.resblock import ResBlock\nfrom chainercv.links import ResNet101\nfrom chainercv import utils\n\nfrom light_head_rcnn.functions import psroi_max_align_2d\nfrom light_head_rcnn.links.model.global_context_module \\\n import GlobalContextModule\nfrom light_head_rcnn.links.model.light_head_rcnn_base import LightHeadRCNN\nfrom light_head_rcnn.links.model.region_proposal_network \\\n import RegionProposalNetwork\n\n\nclass LightHeadRCNNResNet101(LightHeadRCNN):\n\n \"\"\"LightHead RCNN based on ResNet101.\n\n When you specify the path of a pre-trained chainer model serialized as\n a :obj:`.npz` file in the constructor, this chain model automatically\n initializes all the parameters with it.\n When a string in prespecified set is provided, a pretrained model is\n loaded from weights distributed on the Internet.\n The list of pretrained models supported are as follows:\n\n * :obj:`coco`: Loads weights trained with the trainval split of \\\n COCO Detection Dataset.\n * :obj:`imagenet`: Loads weights trained with ImageNet Classfication \\\n task for the feature extractor and the head modules. \\\n Weights that do not have a corresponding layer in ResNet101 \\\n will be randomly initialized.\n\n For descriptions on the interface of this model, please refer to\n :class:`~light_head_rcnn.links.model.light_head_rcnn_base.LightHeadRCNN`\n\n :class:`~light_head_rcnn.links.model.light_head_rcnn_base.LightHeadRCNN`\n supports finer control on random initializations of weights by arguments\n :obj:`vgg_initialW`, :obj:`rpn_initialW`, :obj:`loc_initialW` and\n :obj:`score_initialW`.\n It accepts a callable that takes an array and edits its values.\n If :obj:`None` is passed as an initializer, the default initializer is\n used.\n\n Args:\n n_fg_class (int): The number of classes excluding the background.\n pretrained_model (string): The destination of the pre-trained\n chainer model serialized as a :obj:`.npz` file.\n If this is one of the strings described\n above, it automatically loads weights stored under a directory\n :obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/models/`,\n where :obj:`$CHAINER_DATASET_ROOT` is set as\n :obj:`$HOME/.chainer/dataset` unless you specify another value\n by modifying the environment variable.\n min_size (int): A preprocessing paramter for :meth:`prepare`.\n max_size (int): A preprocessing paramter for :meth:`prepare`.\n ratios (list of floats): This is ratios of width to height of\n the anchors.\n anchor_scales (list of numbers): This is areas of anchors.\n Those areas will be the product of the square of an element in\n :obj:`anchor_scales` and the original area of the reference\n window.\n vgg_initialW (callable): Initializer for the layers corresponding to\n the VGG-16 layers.\n rpn_initialW (callable): Initializer for Region Proposal Network\n layers.\n loc_initialW (callable): Initializer for the localization head.\n score_initialW (callable): Initializer for the score head.\n proposal_creator_params (dict): Key valued paramters for\n :class:`~chainercv.links.model.faster_rcnn.ProposalCreator`.\n\n \"\"\"\n\n _models = {\n 'coco_converted': {\n 'param': {'n_fg_class': 80},\n 'url': 'https://github.com/knorth55/'\n 'chainer-light-head-rcnn/releases/download/v0.0.0/'\n 'light_head_rcnn_resnet101_converted_2018_07_12.npz',\n 'cv2': True\n },\n 'coco': {\n 'param': {'n_fg_class': 80},\n 'url': 'https://github.com/knorth55/'\n 'chainer-light-head-rcnn/releases/download/v1.0.0/'\n 'light_head_rcnn_resnet101_trained_2018_07_26.npz',\n 'cv2': True\n },\n }\n feat_stride = 16\n proposal_creator_params = {\n 'nms_thresh': 0.7,\n 'n_train_pre_nms': 12000,\n 'n_train_post_nms': 2000,\n 'n_test_pre_nms': 6000,\n 'n_test_post_nms': 1000,\n 'force_cpu_nms': False,\n 'min_size': 0,\n }\n\n def __init__(\n self,\n n_fg_class=None,\n pretrained_model=None,\n min_size=800, max_size=1333, roi_size=7,\n ratios=[0.5, 1, 2], anchor_scales=[2, 4, 8, 16, 32],\n loc_normalize_mean=(0., 0., 0., 0.),\n loc_normalize_std=(0.1, 0.1, 0.2, 0.2),\n resnet_initialW=None, rpn_initialW=None,\n global_module_initialW=None,\n loc_initialW=None, score_initialW=None,\n proposal_creator_params=None,\n ):\n\n param, path = utils.prepare_pretrained_model(\n {'n_fg_class': n_fg_class}, pretrained_model, self._models)\n\n if resnet_initialW is None and pretrained_model:\n resnet_initialW = chainer.initializers.HeNormal()\n if rpn_initialW is None:\n rpn_initialW = chainer.initializers.Normal(0.01)\n if global_module_initialW is None:\n global_module_initialW = chainer.initializers.Normal(0.01)\n if loc_initialW is None:\n loc_initialW = chainer.initializers.Normal(0.001)\n if score_initialW is None:\n score_initialW = chainer.initializers.Normal(0.01)\n if proposal_creator_params is not None:\n self.proposal_creator_params = proposal_creator_params\n\n extractor = ResNet101Extractor(\n initialW=resnet_initialW)\n rpn = RegionProposalNetwork(\n 1024, 512,\n ratios=ratios,\n anchor_scales=anchor_scales,\n feat_stride=self.feat_stride,\n initialW=rpn_initialW,\n proposal_creator_params=self.proposal_creator_params,\n )\n\n head = LightHeadRCNNResNet101Head(\n param['n_fg_class'] + 1,\n roi_size=roi_size,\n spatial_scale=1. / self.feat_stride,\n global_module_initialW=global_module_initialW,\n loc_initialW=loc_initialW,\n score_initialW=score_initialW\n )\n mean = np.array([122.7717, 115.9465, 102.9801],\n dtype=np.float32)[:, None, None]\n\n super(LightHeadRCNNResNet101, self).__init__(\n extractor, rpn, head, mean, min_size, max_size,\n loc_normalize_mean, loc_normalize_std)\n\n if path == 'imagenet':\n self._copy_imagenet_pretrained_resnet()\n elif path:\n chainer.serializers.load_npz(path, self)\n\n def _copy_imagenet_pretrained_resnet(self):\n def _copy_conv2dbn(src, dst):\n dst.conv.W.array = src.conv.W.array\n if src.conv.b is not None and dst.conv.b is not None:\n dst.conv.b.array = src.conv.b.array\n dst.bn.gamma.array = src.bn.gamma.array\n dst.bn.beta.array = src.bn.beta.array\n dst.bn.avg_var = src.bn.avg_var\n dst.bn.avg_mean = src.bn.avg_mean\n\n def _copy_bottleneck(src, dst):\n if hasattr(src, 'residual_conv'):\n _copy_conv2dbn(src.residual_conv, dst.residual_conv)\n _copy_conv2dbn(src.conv1, dst.conv1)\n _copy_conv2dbn(src.conv2, dst.conv2)\n _copy_conv2dbn(src.conv3, dst.conv3)\n\n def _copy_resblock(src, dst):\n for layer_name in src.layer_names:\n _copy_bottleneck(\n getattr(src, layer_name), getattr(dst, layer_name))\n\n pretrained_model = ResNet101(arch='he', pretrained_model='imagenet')\n _copy_conv2dbn(pretrained_model.conv1, self.extractor.conv1)\n _copy_resblock(pretrained_model.res2, self.extractor.res2)\n _copy_resblock(pretrained_model.res3, self.extractor.res3)\n _copy_resblock(pretrained_model.res4, self.extractor.res4)\n _copy_resblock(pretrained_model.res5, self.extractor.res5)\n\n\nclass ResNet101Extractor(chainer.Chain):\n\n \"\"\"ResNet101 Extractor for LightHeadRCNN ResNet101 implementation.\n\n This class is used as an extractor for LightHeadRCNNResNet101.\n This outputs feature maps.\n Dilated convolution is used in the C5 stage.\n\n Args:\n initialW: Initializer for ResNet101 extractor.\n \"\"\"\n\n def __init__(self, initialW=None):\n super(ResNet101Extractor, self).__init__()\n\n if initialW is None:\n initialW = chainer.initializers.HeNormal()\n kwargs = {\n 'initialW': initialW,\n 'bn_kwargs': {'eps': 1e-5},\n 'stride_first': True\n }\n\n with self.init_scope():\n # ResNet\n self.conv1 = Conv2DBNActiv(\n 3, 64, 7, 2, 3, nobias=True, initialW=initialW)\n self.pool1 = lambda x: F.max_pooling_2d(x, ksize=3, stride=2)\n self.res2 = ResBlock(3, 64, 64, 256, 1, **kwargs)\n self.res3 = ResBlock(4, 256, 128, 512, 2, **kwargs)\n self.res4 = ResBlock(23, 512, 256, 1024, 2, **kwargs)\n self.res5 = ResBlock(3, 1024, 512, 2048, 1, 2, **kwargs)\n\n def __call__(self, x):\n \"\"\"Forward the chain.\n\n Args:\n x (~chainer.Variable): 4D image variable.\n\n \"\"\"\n\n with chainer.using_config('train', False):\n h = self.pool1(self.conv1(x))\n h = self.res2(h)\n h.unchain_backward()\n h = self.res3(h)\n res4 = self.res4(h)\n res5 = self.res5(res4)\n return res4, res5\n\n\nclass LightHeadRCNNResNet101Head(chainer.Chain):\n\n def __init__(\n self, n_class, roi_size, spatial_scale,\n global_module_initialW=None,\n loc_initialW=None, score_initialW=None\n ):\n\n super(LightHeadRCNNResNet101Head, self).__init__()\n self.n_class = n_class\n self.spatial_scale = spatial_scale\n self.roi_size = roi_size\n with self.init_scope():\n self.global_context_module = GlobalContextModule(\n 2048, 256, self.roi_size * self.roi_size * 10, 15,\n initialW=global_module_initialW)\n self.fc1 = L.Linear(\n self.roi_size * self.roi_size * 10, 2048,\n initialW=score_initialW)\n self.score = L.Linear(2048, n_class, initialW=score_initialW)\n self.cls_loc = L.Linear(2048, 4 * n_class, initialW=loc_initialW)\n\n def __call__(self, x, rois, roi_indices):\n # global context module\n h = self.global_context_module(x)\n # psroi max align\n pool = psroi_max_align_2d(\n h, rois, roi_indices,\n 10, self.roi_size, self.roi_size,\n self.spatial_scale, self.roi_size,\n sampling_ratio=2.)\n # fc\n fc1 = F.relu(self.fc1(pool))\n roi_cls_locs = self.cls_loc(fc1)\n roi_scores = self.score(fc1)\n return roi_cls_locs, roi_scores\n"
] |
[
[
"numpy.arange",
"numpy.random.uniform",
"numpy.array",
"numpy.random.shuffle"
],
[
"numpy.arange",
"numpy.random.uniform",
"numpy.array",
"numpy.random.shuffle"
],
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dingdian110/AutoDC
|
[
"f5ccca6bea993bcff3e804fb859e8b25ae020b5c",
"f5ccca6bea993bcff3e804fb859e8b25ae020b5c",
"f5ccca6bea993bcff3e804fb859e8b25ae020b5c",
"f5ccca6bea993bcff3e804fb859e8b25ae020b5c",
"f5ccca6bea993bcff3e804fb859e8b25ae020b5c"
] |
[
"autodc/components/ensemble/unnamed_ensemble.py",
"autodc/components/models/regression/adaboost.py",
"autodc/components/feature_engineering/transformations/selector/variance_selector.py",
"autodc/components/transfer_learning/tlbo/priors/default_priors.py",
"autodc/components/metrics/metric.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport scipy.spatial\nfrom sklearn.metrics.scorer import _BaseScorer\nfrom autodc.components.utils.constants import CLS_TASKS\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.metrics import accuracy_score\n\n\ndef choose_base_models_regression(predictions, labels, num_model):\n base_mask = [0] * len(predictions)\n dif = predictions - labels\n dif[dif > 0] = 1\n dif[dif < 0] = -1\n '''Calculate the distance between each model'''\n dist = scipy.spatial.distance.cdist(dif, dif)\n total_dist = np.sum(dist, 1)\n '''Select the model which has large distance to other models'''\n selected_models = total_dist.argsort()[-num_model:]\n for model in selected_models:\n base_mask[model] = 1\n return base_mask\n\n\ndef choose_base_models_classification(predictions, num_model, interval=20):\n num_class = predictions.shape[2]\n num_total_models = predictions.shape[0]\n base_mask = [0] * len(predictions)\n bucket = np.arange(interval + 1) / interval\n bucket[-1] += 1e-8\n distribution = []\n for prediction in predictions:\n freq_array = []\n for i in range(num_class):\n class_i = prediction[:, i]\n group = pd.cut(class_i, bucket, right=False)\n counts = group.value_counts()\n freq = list(counts / counts.sum())\n freq_array += freq\n distribution.append(freq_array) # Shape: (num_total_models,20*num_class)\n distribution = np.array(distribution)\n\n # Apply the clustering algorithm\n model = AgglomerativeClustering(n_clusters=num_model, linkage=\"complete\")\n cluster = model.fit(distribution)\n \"\"\"\n Select models which are the most nearest to the clustering center\n selected_models = []\n \"\"\"\n for cluster_label in range(num_model):\n cluster_center = np.zeros(distribution.shape[1])\n count = 0\n \"\"\"\n Averaging the distribution which belong the same clustering class\n and then get the corresponding distribution center\n \"\"\"\n for i in range(num_total_models):\n if cluster.labels_[i] == cluster_label:\n count += 1\n cluster_center += distribution[i]\n cluster_center = cluster_center / count\n distances = np.sqrt(np.sum(np.asarray(cluster_center - distribution) ** 2, axis=1))\n selected_model = distances.argmin()\n base_mask[selected_model] = 1\n\n return base_mask\n\n\ndef calculate_weights(predictions, labels, base_mask):\n num_total_models = predictions.shape[0]\n num_samples = predictions.shape[1]\n weights = np.zeros((num_samples, num_total_models))\n for i in range(num_total_models):\n if base_mask[i] != 0:\n predicted_labels = np.argmax(predictions[i], 1)\n acc = accuracy_score(predicted_labels, labels)\n model_weight = 0.5 * np.log(acc / (1 - acc)) # a concrete value\n shannon_ent = -1.0 * np.sum(predictions[i] * np.log2(predictions[i]), 1) # shape: (1, num_samples)\n confidence = 1 / np.exp(shannon_ent)\n model_weight = model_weight * confidence # The weight of current model to all samples\n model_weight = model_weight.reshape(num_samples, 1)\n weights[:, i] = model_weight\n return weights\n\n\ndef calculate_weights_simple(predictions, labels, base_mask):\n num_total_models = predictions.shape[0]\n weights = [0] * num_total_models\n for i in range(num_total_models):\n if base_mask[i] != 0:\n predicted_labels = np.argmax(predictions[i], 1)\n acc = accuracy_score(predicted_labels, labels)\n model_weight = 0.5 * np.log(acc / (1 - acc)) # a concrete value\n weights[i] = model_weight\n return weights\n\n\nclass UnnamedEnsemble:\n def __init__(\n self,\n ensemble_size: int,\n task_type: int,\n metric: _BaseScorer,\n random_state: np.random.RandomState = None,\n ):\n self.ensemble_size = ensemble_size\n self.task_type = task_type\n self.metric = metric\n self.random_state = random_state\n self.base_model_mask = None\n self.weights_ = None\n\n def fit(self, predictions, labels):\n \"\"\"\n\n :param predictions: proba_predictions for cls. Shape: (num_models,num_samples,num_class) for cls\n :param labels: Shape: (num_samples,)\n :return: self\n \"\"\"\n if self.task_type in CLS_TASKS: # If classification\n self.base_model_mask = choose_base_models(predictions, labels, self.ensemble_size)\n self.weights_ = calculate_weights(predictions, labels, self.base_model_mask)\n else:\n pass\n return self\n\n def predict(self, predictions):\n predictions = np.asarray(predictions)\n\n # if predictions.shape[0] == len(self.weights_),\n # predictions include those of zero-weight models.\n if predictions.shape[0] == len(self.weights_):\n return np.average(predictions, axis=0, weights=self.weights_)\n\n # if prediction model.shape[0] == len(non_null_weights),\n # predictions do not include those of zero-weight models.\n elif predictions.shape[0] == np.count_nonzero(self.weights_):\n non_null_weights = [w for w in self.weights_ if w > 0]\n return np.average(predictions, axis=0, weights=non_null_weights)\n\n # If none of the above applies, then something must have gone wrong.\n else:\n raise ValueError(\"The dimensions of ensemble predictions\"\n \" and ensemble weights do not match!\")\n",
"import numpy as np\nfrom ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import UniformFloatHyperparameter, \\\n UniformIntegerHyperparameter\n\nfrom autodc.components.models.base_model import BaseRegressionModel\nfrom autodc.components.utils.constants import DENSE, SPARSE, UNSIGNED_DATA, PREDICTIONS\n\n\nclass AdaboostRegressor(BaseRegressionModel):\n\n def __init__(self, n_estimators, learning_rate, max_depth,\n random_state=None):\n self.n_estimators = n_estimators\n self.learning_rate = learning_rate\n self.random_state = random_state\n self.max_depth = max_depth\n self.estimator = None\n self.time_limit = None\n\n def fit(self, X, Y, sample_weight=None):\n from sklearn.ensemble import AdaBoostRegressor as ABR\n from sklearn.tree import DecisionTreeRegressor\n self.n_estimators = int(self.n_estimators)\n self.learning_rate = float(self.learning_rate)\n self.max_depth = int(self.max_depth)\n base_estimator = DecisionTreeRegressor(max_depth=self.max_depth)\n\n estimator = ABR(\n base_estimator=base_estimator,\n n_estimators=self.n_estimators,\n learning_rate=self.learning_rate,\n random_state=self.random_state\n )\n\n estimator.fit(X, Y, sample_weight=sample_weight)\n\n self.estimator = estimator\n return self\n\n def predict(self, X):\n if self.estimator is None:\n raise NotImplementedError\n return self.estimator.predict(X)\n\n @staticmethod\n def get_properties(dataset_properties=None):\n return {'shortname': 'AB',\n 'name': 'AdaBoost Regression',\n 'handles_regression': True,\n 'handles_classification': False,\n 'handles_multiclass': False,\n 'handles_multilabel': False,\n 'is_deterministic': True,\n 'input': (DENSE, SPARSE, UNSIGNED_DATA),\n 'output': (PREDICTIONS,)}\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):\n if optimizer == 'smac':\n cs = ConfigurationSpace()\n\n n_estimators = UniformIntegerHyperparameter(\n name=\"n_estimators\", lower=50, upper=500, default_value=50, log=False)\n learning_rate = UniformFloatHyperparameter(\n name=\"learning_rate\", lower=0.01, upper=2, default_value=0.1, log=True)\n max_depth = UniformIntegerHyperparameter(\n name=\"max_depth\", lower=1, upper=10, default_value=1, log=False)\n\n cs.add_hyperparameters([n_estimators, learning_rate, max_depth])\n return cs\n elif optimizer == 'tpe':\n from hyperopt import hp\n space = {'n_estimators': hp.randint('ab_n_estimators', 451) + 50,\n 'learning_rate': hp.loguniform('ab_learning_rate', np.log(0.01), np.log(2)),\n 'max_depth': hp.randint('ab_max_depth', 10) + 1}\n\n init_trial = {'n_estimators': 50, 'learning_rate': 0.1, 'algorithm': \"SAMME.R\", 'max_depth': 1}\n return space\n",
"from ConfigSpace.configuration_space import ConfigurationSpace\nfrom autodc.components.feature_engineering.transformations.base_transformer import *\n\n\nclass VarianceSelector(Transformer):\n def __init__(self, threshold=1e-7):\n super().__init__(\"variance_selector\", 9)\n self.input_type = [NUMERICAL, DISCRETE, CATEGORICAL]\n self.compound_mode = 'only_new'\n self.threshold = threshold\n\n def operate(self, input_datanode, target_fields=None):\n from sklearn.feature_selection import VarianceThreshold\n\n feature_types = input_datanode.feature_types\n X, y = input_datanode.data\n if target_fields is None:\n target_fields = collect_fields(feature_types, self.input_type)\n X_new = X.copy()\n else:\n X_new = X[:, target_fields]\n\n n_fields = len(feature_types)\n irrevalent_fields = list(range(n_fields))\n for field_id in target_fields:\n irrevalent_fields.remove(field_id)\n\n is_selected = [True] * len(target_fields)\n if self.model is None:\n self.model = VarianceThreshold(threshold=self.threshold)\n self.model.fit(X_new)\n\n for idx, var in enumerate(self.model.variances_):\n is_selected[idx] = True if var > self.threshold else False\n\n irrevalent_types = [feature_types[idx] for idx in irrevalent_fields]\n selected_types = [feature_types[idx] for idx in target_fields if is_selected[idx]]\n selected_types.extend(irrevalent_types)\n\n _X = self.model.transform(X_new)\n\n if len(irrevalent_fields) > 0:\n new_X = np.hstack((_X, X[:, irrevalent_fields]))\n if input_datanode.feature_names is not None:\n feature_names = np.hstack(([input_datanode.feature_names[idx] for idx in irrevalent_fields],\n [input_datanode.feature_names[idx] for idx in self.model.get_support(True)]))\n else:\n feature_names = None\n else:\n new_X = _X\n if input_datanode.feature_names is not None:\n feature_names = [input_datanode.feature_names[idx] for idx in self.model.get_support(True)]\n else:\n feature_names = None\n new_feature_types = selected_types\n output_datanode = DataNode((new_X, y), new_feature_types, input_datanode.task_type, feature_names=feature_names)\n output_datanode.trans_hist = input_datanode.trans_hist.copy()\n output_datanode.trans_hist.append(self.type)\n output_datanode.enable_balance = input_datanode.enable_balance\n output_datanode.data_balance = input_datanode.data_balance\n self.target_fields = target_fields.copy()\n\n return output_datanode\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):\n if optimizer == 'smac':\n cs = ConfigurationSpace()\n return cs\n elif optimizer == 'tpe':\n from hyperopt import hp\n space = {}\n return space\n",
"import numpy as np\n\nfrom .base_prior import BasePrior, TophatPrior, \\\n LognormalPrior, HorseshoePrior\n\n\nclass DefaultPrior(BasePrior):\n\n def __init__(self, n_dims, rng=None):\n if rng is None:\n self.rng = np.random.RandomState(np.random.randint(0, 10000))\n else:\n self.rng = rng\n\n # The number of hyperparameters\n self.n_dims = n_dims\n\n # Prior for the Matern52 lengthscales\n self.tophat = TophatPrior(-10, 2, rng=self.rng)\n\n # Prior for the covariance amplitude\n self.ln_prior = LognormalPrior(mean=0.0, sigma=1.0, rng=self.rng)\n\n # Prior for the noise\n self.horseshoe = HorseshoePrior(scale=0.1, rng=self.rng)\n\n def lnprob(self, theta):\n lp = 0\n # Covariance amplitude\n lp += self.ln_prior.lnprob(theta[0])\n # Lengthscales\n lp += self.tophat.lnprob(theta[1:-1])\n # Noise\n lp += self.horseshoe.lnprob(theta[-1])\n\n return lp\n\n def sample_from_prior(self, n_samples):\n p0 = np.zeros([n_samples, self.n_dims])\n # Covariance amplitude\n p0[:, 0] = self.ln_prior.sample_from_prior(n_samples)[:, 0]\n # Lengthscales\n ls_sample = np.array([self.tophat.sample_from_prior(n_samples)[:, 0]\n for _ in range(1, (self.n_dims - 1))]).T\n p0[:, 1:(self.n_dims - 1)] = ls_sample\n # Noise\n p0[:, -1] = self.horseshoe.sample_from_prior(n_samples)[:, 0]\n return p0\n\n def gradient(self, theta):\n # TODO: Implement real gradient here\n return np.zeros([theta.shape[0]])\n",
"from sklearn.metrics.scorer import make_scorer, _BaseScorer\nfrom functools import partial\n\n\ndef get_metric(metric):\n # Metrics for classification\n if metric in [\"accuracy\", \"acc\"]:\n from sklearn.metrics import accuracy_score\n return make_scorer(accuracy_score)\n elif metric in [\"balanced_accuracy\", \"bal_acc\"]:\n from sklearn.metrics.scorer import balanced_accuracy_scorer\n return balanced_accuracy_scorer\n elif metric == 'f1':\n from sklearn.metrics import f1_score\n return make_scorer(partial(f1_score, average='macro'))\n elif metric == 'precision':\n from sklearn.metrics import precision_score\n return make_scorer(precision_score)\n elif metric == 'recall':\n from sklearn.metrics import recall_score\n return make_scorer(recall_score)\n elif metric == \"auc\":\n from sklearn.metrics import roc_auc_score\n return make_scorer(roc_auc_score, needs_threshold=True)\n elif metric in ['log_loss', 'cross_entropy']:\n from sklearn.metrics import log_loss\n return make_scorer(log_loss, greater_is_better=False, needs_proba=True)\n\n # Metrics for regression\n elif metric in [\"mean_squared_error\", \"mse\"]:\n from sklearn.metrics import mean_squared_error\n return make_scorer(mean_squared_error, greater_is_better=False)\n elif metric == \"rmse\":\n from .rgs_metrics import rmse\n return make_scorer(rmse, greater_is_better=False)\n elif metric in ['mean_squared_log_error', \"msle\"]:\n from sklearn.metrics import mean_squared_log_error\n return make_scorer(mean_squared_log_error, greater_is_better=False)\n elif metric == \"evs\":\n from sklearn.metrics import explained_variance_score\n return make_scorer(explained_variance_score)\n elif metric == \"r2\":\n from sklearn.metrics import r2_score\n return make_scorer(r2_score)\n elif metric == \"max_error\":\n from sklearn.metrics import max_error\n return make_scorer(max_error, greater_is_better=False)\n elif metric in [\"mean_absolute_error\", \"mae\"]:\n from sklearn.metrics import mean_absolute_error\n return make_scorer(mean_absolute_error, greater_is_better=False)\n elif metric == \"median_absolute_error\":\n from sklearn.metrics import median_absolute_error\n return make_scorer(median_absolute_error, greater_is_better=False)\n elif isinstance(metric, _BaseScorer):\n return metric\n elif callable(metric):\n import warnings\n warnings.warn(\"metric receives a callable and we consider to maximize it!\")\n return make_scorer(metric)\n else:\n raise ValueError(\"Given\", str(metric), \". Expect a str or a sklearn.Scorer or a callable\")\n"
] |
[
[
"numpy.log",
"numpy.log2",
"numpy.asarray",
"numpy.arange",
"numpy.argmax",
"pandas.cut",
"numpy.count_nonzero",
"numpy.average",
"numpy.exp",
"sklearn.cluster.AgglomerativeClustering",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"sklearn.metrics.accuracy_score"
],
[
"numpy.log",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.ensemble.AdaBoostRegressor"
],
[
"sklearn.feature_selection.VarianceThreshold"
],
[
"numpy.zeros",
"numpy.random.randint"
],
[
"sklearn.metrics.scorer.make_scorer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.