repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
chaosWsF/Python-Practice
[ "ff617675b6bcd125933024bb4c246b63a272314d" ]
[ "leetcode/0447_number_of_boomerangs.py" ]
[ "\"\"\"\nGiven n points in the plane that are all pairwise distinct, a \"boomerang\" is a tuple of points (i, j, k) \nsuch that the distance between i and j equals the distance between i and k (the order of the tuple matters).\n\nFind the number of boomerangs. You may assume that n will be at most 500 and coordinates of points are all \nin the range [-10000, 10000] (inclusive).\n\nExample:\n Input: [[0,0],[1,0],[2,0]]\n Output: 2\n Explanation: The two boomerangs are [[1,0],[0,0],[2,0]] and [[1,0],[2,0],[0,0]]\n\"\"\"\nfrom collections import Counter\nimport numpy as np\nfrom scipy.spatial.distance import pdist, squareform\n\n\nclass Solution:\n def numberOfBoomerangs1(self, points): # 612ms\n res = 0\n for p in points:\n d = {}\n for q in points:\n if q != p:\n dx = p[0] - q[0]\n dy = p[1] - q[1]\n dsq = dx * dx + dy * dy\n if dsq in d:\n res += d[dsq]\n d[dsq] += 1\n else:\n d[dsq] = 1\n \n return res * 2\n \n def numberOfBoomerangs2(self, points): # 592ms\n return sum(m * (m - 1) for x1, y1 in points for m in Counter((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) for x2, y2 in points).values())\n\n def numberOfBoomerangs3(self, points): # 312ms\n def dist_counter(dis):\n m = np.unique(dis, return_counts=True)[1]\n return np.sum(m * (m - 1))\n \n return sum(dist_counter(x) for x in squareform(pdist(np.array(points))))\n" ]
[ [ "numpy.array", "numpy.sum", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KumarLabJax/MouseSleep
[ "a0aaeb9a721a781bde13f2064a5291ab3ee80858" ]
[ "dataset-creation/ExportScaledSeg.py" ]
[ "import imageio\nimport cv2\nimport numpy as np\nimport os, sys\nimport argparse\n\noutput_order = ['m00','m10','m01','m20','m11','m02','m30','m21','m12','m03','mu20','mu11','mu02','mu30','mu21','mu12','mu03','nu20','nu11','nu02','nu30','nu21','nu12','nu03']\n\ndef writeCSVHeader(filename):\n\twriter = open(filename, 'w')\n\twriter.write(\"m00,m10,m01,m20,m11,m02,m30,m21,m12,m03,mu20,mu11,mu02,mu30,mu21,mu12,mu03,nu20,nu11,nu02,nu30,nu21,nu12,nu03,perimeter\\n\")\n\twriter.close()\n\tappend_writer = open(filename, 'a')\n\treturn append_writer\n\ndef process_video(args):\n\tvid_reader = imageio.get_reader(args.input_file)\n\tfull_writer = writeCSVHeader(os.path.splitext(args.input_file)[0] + '_DarkMask_' + str(args.frame_size) + '.csv')\n\tfor frame in vid_reader:\n\t\tframe = frame[:,:,0]\n\t\tframe = cv2.resize(frame, (args.frame_size, args.frame_size))\n\t\tmasked_full_frame = np.zeros_like(frame)\n\t\tmasked_full_frame[frame > 128] = 1\n\t\tmoments = cv2.moments(masked_full_frame)\n\t\tcontours, hierarchy = cv2.findContours(np.uint8(masked_full_frame), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n\t\tif len(contours) < 1:\n\t\t\t# Default values\n\t\t\tmoments = {'m00': 0, 'm10': 0, 'm01': 0, 'm20': 0, 'm11': 0, 'm02': 0, 'm30': 0, 'm21': 0, 'm12': 0, 'm03': 0, 'mu20': 0, 'mu11': 0, 'mu02': 0, 'mu30': 0, 'mu21': 0, 'mu12': 0, 'mu03': 0, 'nu20': 0, 'nu11': 0, 'nu02': 0, 'nu30': 0, 'nu21': 0, 'nu12': 0, 'nu03': 0}\n\t\t\tperimeter = 0\n\t\telse:\n\t\t\tmax_contour = None\n\t\t\tmax_size = -1\n\t\t\tfor k in contours:\n\t\t\t\tblob_size = cv2.contourArea(k)\n\t\t\t\tif blob_size > max_size:\n\t\t\t\t\tmax_contour = k\n\t\t\t\t\tmax_size = blob_size\n\t\t\tperimeter = cv2.arcLength(max_contour, True)\n\t\tnp.savetxt(full_writer, [list([moments[x] for x in output_order]) + [perimeter]], delimiter=',')\n\n\tvid_reader.close()\n\tfull_writer.close()\n\t# cv2.imwrite('masked.png',masked_full_frame*254)\n\t# cv2.imwrite('frame.png',frame)\n\ndef main(argv):\n\tparser = argparse.ArgumentParser(description='Exports ')\n\tparser.add_argument('--input_file', help='Input dataset to process', required=True)\n\tparser.add_argument('--frame_size', help='Scaled frame size to use', default=1080, type=int)\n\targs = parser.parse_args()\n\tprocess_video(args)\n\nif __name__ == '__main__':\n\tmain(sys.argv[1:])\n" ]
[ [ "numpy.uint8", "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
andrewsoong/LISF
[ "20e3b00a72b6b348c567d0703550f290881679b4" ]
[ "ldt/utils/usaf/bcsd_preproc/lib_bcsd_metrics/temporal_disaggregation_6hourly_module.py" ]
[ "#!/usr/bin/env python\n\"\"\"\n# Author: Shrad Shukla\n# coding: utf-8\n#Author: Shrad Shukla\n#Usage: This is a module for the BCSD code.\n#This module bias corrects a forecasts following probability\n#mapping approach as described in Wood et al. 2002\n#Date: August 06, 2015\n# In[28]:\n\"\"\"\n\nfrom __future__ import division\n#import pandas as pdimport calendar\nimport os.path as op\nimport sys\nfrom datetime import datetime\nimport calendar\nfrom time import ctime as t_ctime\nfrom time import time as t_time\nfrom dateutil.relativedelta import relativedelta\nimport numpy as np\n#import time\n# pylint: disable=no-name-in-module\nfrom netCDF4 import Dataset as nc4_dataset\nfrom netCDF4 import date2num as nc4_date2num\n#import netCDF4 as nc\n# pylint: enable=no-name-in-module\nfrom Shrad_modules import read_nc_files, MAKEDIR\n\ndef write_bc_netcdf(outfile, var, varname, description, source, var_units, \\\nvar_standard_name, lons, lats, sdate, dates, sig_digit, north_east_corner_lat, \\\nnorth_east_corner_lon, south_west_corner_lat, south_west_corner_lon, \\\nresolution_x, resolution_y, time_increment):\n \"\"\"write netcdf\"\"\"\n rootgrp = nc4_dataset(outfile, 'w', format='NETCDF4_CLASSIC')\n time = rootgrp.createDimension('time', None)\n longitude = rootgrp.createDimension('lon', len(lons))\n latitude = rootgrp.createDimension('lat', len(lats))\n\n longitudes = rootgrp.createVariable('lon', 'f4', ('lon',))\n latitudes = rootgrp.createVariable('lat', 'f4', ('lat',))\n times = rootgrp.createVariable('time', 'f4', ('time', ))\n\n # two dimensions unlimited.\n varname = rootgrp.createVariable(varname, 'f4', ('time', 'lat', \\\n 'lon',), fill_value=-9999, zlib=True, \\\n least_significant_digit=sig_digit)\n rootgrp.missing_value = -9999\n rootgrp.description = description\n rootgrp.zenith_interp = \"true,false,\"\n rootgrp.MAP_PROJECTION = \"EQUIDISTANT CYLINDRICAL\"\n rootgrp.conventions = \"CF-1.6\"\n rootgrp.south_west_corner_lat = float(south_west_corner_lat)\n rootgrp.south_west_corner_lon = float(south_west_corner_lon)\n rootgrp.north_east_corner_lat = float(north_east_corner_lat)\n rootgrp.north_east_corner_lon = float(north_east_corner_lon)\n rootgrp.DX = resolution_x\n rootgrp.DY = resolution_y\n rootgrp.history = 'Created ' + t_ctime(t_time())\n rootgrp.source = source\n latitudes.units = 'degrees_north'\n longitudes.units = 'degrees_east'\n varname.units = var_units\n varname.standard_name = var_standard_name\n string_date = datetime.strftime(sdate, \"%Y-%m-%d %H:%M:%S\")\n times.units = 'minutes since ' + string_date\n times.time_increment = time_increment\n times.begin_date = datetime.strftime(sdate, \"%Y%m%d\")\n times.begin_time = '000000'\n times.calendar = 'gregorian'\n latitudes[:] = lats\n longitudes[:] = lons\n varname[:, :, :] = var\n times[:] = nc4_date2num(dates, units=times.units, calendar=times.calendar)\n rootgrp.close()\n\n## Usage: <Name of variable in observed climatology> <Name of variable in\n## reforecast climatology (same as the name in target forecast>\n## <forecast model number>\nCMDARGS = str(sys.argv)\nOBS_VAR = str(sys.argv[1]) ##\nFCST_VAR = str(sys.argv[2]) ##\nINIT_FCST_YEAR = int(sys.argv[3])\n## initial forecast year for which to downscale the data\nINIT_FCST_MON = int(sys.argv[4])\n## initial forecast month for which to downscale the data\nBC_VAR = str(sys.argv[5])\n## This is used to figure out if the variable a precipitation variable or not\nUNIT = str(sys.argv[6])\nLAT1, LAT2, LON1, LON2 = int(sys.argv[7]), int(sys.argv[8]), int(sys.argv[9]), int(sys.argv[10])\n\n\nMODEL_NAME = str(sys.argv[11])\nENS_NUM = int(sys.argv[12])\nLEAD_FINAL = int(sys.argv[13])\nMONTH_NAME_TEMPLATE = '{}01'\nMONTH_NAME = MONTH_NAME_TEMPLATE.format(calendar.month_abbr[INIT_FCST_MON])\n\nprint(f\"*** LEAD FINAL: {LEAD_FINAL}\")\nBC_FCST_SYR, BC_FCST_EYR = int(sys.argv[14]), int(sys.argv[15])\nif FCST_VAR == 'PRECTOT':\n MASK_FILE = str(sys.argv[16])\nelse:\n MASK_FILE = str(sys.argv[17])\n\nMASK = read_nc_files(MASK_FILE, 'mask')\nprint(f\"MASK: {MASK.shape}\")\n\nMONTHLY_BC_FCST_DIR = str(sys.argv[18])\nMONTHLY_RAW_FCST_DIR = str(sys.argv[19])\nSUBDAILY_RAW_FCST_DIR = str(sys.argv[20])\nBASE_OUTDIR = str(sys.argv[21])\nOUTDIR_TEMPLATE = '{}/{:04d}/ens{:01d}'\n\n\n# All file formats\nMONTHLY_BC_INFILE_TEMPLATE = '{}/{}.{}.{}_{:04d}_{:04d}.nc'\nMONTHLY_RAW_INFILE_TEMPLATE = '{}/{:04d}/ens{:01d}/{}.cfsv2.{:04d}{:02d}.nc'\nSUBDAILY_INFILE_TEMPLATE = '{}/{:04d}/ens{:01d}/{}.cfsv2.{:04d}{:02d}.nc'\nSUBDAILY_OUTFILE_TEMPLATE = '{}/{}.{:04d}{:02d}.nc4'\nMONTHLY_NMME_INFILE_TEMPLATE = '{}/{:04d}/ens{:01d}/{}.nmme.monthly.{:04d}{:02d}.nc'\n\nfor MON in [INIT_FCST_MON]:\n MONTH_NAME = MONTH_NAME_TEMPLATE.format((calendar.month_abbr[MON]).lower())\n ## This provides abbrevated version of the name of a month: (e.g. for\n ## January (i.e. Month number = 1) it will return \"Jan\"). The abbrevated\n ## name is used in the forecasts file name\n print(f\"Forecast Initialization month is {MONTH_NAME}\")\n ### First read bias corrected monthly forecast data\n BC_INFILE = MONTHLY_BC_INFILE_TEMPLATE.format(MONTHLY_BC_FCST_DIR,\\\n FCST_VAR, MODEL_NAME, MONTH_NAME, BC_FCST_SYR, BC_FCST_EYR)\n\n print(f\"Reading bias corrected monthly forecasts {BC_INFILE}\")\n LATS = read_nc_files(BC_INFILE, 'latitude')\n LONS = read_nc_files(BC_INFILE, 'longitude')\n MON_BC_DATA = read_nc_files(BC_INFILE, FCST_VAR)\n\n ## Shape of the above dataset time, Lead, Ens, latitude, longitude\n for ens in range(ENS_NUM):\n OUTDIR = OUTDIR_TEMPLATE.format(BASE_OUTDIR, INIT_FCST_YEAR, ens+1)\n if op.isdir(OUTDIR):\n pass\n else:\n MAKEDIR(OUTDIR)\n print(f\"OUTDIR is {OUTDIR}\")\n for LEAD_NUM in range(0, LEAD_FINAL): ## Loop from lead =0 to Final Lead\n FCST_DATE = datetime(INIT_FCST_YEAR, INIT_FCST_MON, 1, 6) + \\\n relativedelta(months=LEAD_NUM)\n FCST_YEAR, FCST_MONTH = FCST_DATE.year, FCST_DATE.month\n\n # Number of subdaily time steps in the target forecast month\n NUM_TIMESTEPS = 4*calendar.monthrange(FCST_YEAR, FCST_MONTH)[1]\n\n # Using number of days above to read input daily forecasts\n # and define array to store output file\n OUTFILE = SUBDAILY_OUTFILE_TEMPLATE.format(OUTDIR, OBS_VAR, \\\n FCST_YEAR, FCST_MONTH)\n OUTPUT_BC_DATA = np.ones((NUM_TIMESTEPS, len(LATS), len(LONS)))*-999\n # Monthly raw data\n if FCST_VAR != 'PRECTOT':\n MONTHLY_INFILE = MONTHLY_RAW_INFILE_TEMPLATE.format(\\\n MONTHLY_RAW_FCST_DIR, INIT_FCST_YEAR, ens+1, MONTH_NAME, \\\n FCST_YEAR, FCST_MONTH)\n else:\n print(\"Temporarily using nmme unique TEMPLATE. \\\n Recode in future.\")\n MONTHLY_INFILE = MONTHLY_NMME_INFILE_TEMPLATE.format(\\\n MONTHLY_RAW_FCST_DIR, INIT_FCST_YEAR, ens+1, MONTH_NAME, \\\n FCST_YEAR, FCST_MONTH)\n print(f\"Reading raw monthly forecast {MONTHLY_INFILE}\")\n MONTHLY_INPUT_RAW_DATA = read_nc_files(MONTHLY_INFILE, FCST_VAR)[0,]\n # Sub-Daily raw data\n SUBDAILY_INFILE = SUBDAILY_INFILE_TEMPLATE.format(\\\n SUBDAILY_RAW_FCST_DIR, INIT_FCST_YEAR, ens+1, MONTH_NAME, \\\n FCST_YEAR, FCST_MONTH)\n print(f\"Reading raw sub-daily forecast {SUBDAILY_INFILE}\")\n INPUT_RAW_DATA = read_nc_files(SUBDAILY_INFILE, FCST_VAR)\n print(f\"MONTHLY_BC_DATA: {MON_BC_DATA.shape}\")\n print(f\"MONTHLY_INPUT_RAW_DATA: {MONTHLY_INPUT_RAW_DATA}\")\n print(f\"INPUT_RAW_DATA: {INPUT_RAW_DATA.shape}\")\n print(f\"OUTPUT_BC_DATA: {OUTPUT_BC_DATA.shape}\")\n #for lat_num, LATS in enumerate(LATS):\n for lat_num, lat_value in enumerate(LATS):\n #for lon_num in LONS in enumerate(LONS):\n for lon_num, lon_value in enumerate(LONS):\n ## Only work with grid cells that are within the given mask\n if ((LAT1 <= LATS[lat_num]) and (LATS[lat_num] <= LAT2) \\\n and (LON1 <= LONS[lon_num]) and \\\n (LONS[lon_num] <= LON2)):\n # Bias corrected monthly value\n MON_BC_VALUE = MON_BC_DATA[(\\\n INIT_FCST_YEAR-BC_FCST_SYR), LEAD_NUM, ens, \\\n lat_num, lon_num]\n # Raw Monthly value\n MON_RAW_VALUE = MONTHLY_INPUT_RAW_DATA[lat_num, lon_num]\n if BC_VAR == 'PRCP':\n if MON_RAW_VALUE == 0:\n CORRECTION_FACTOR = MON_BC_VALUE\n ## HACK## for when input monthly value is 0\n OUTPUT_BC_DATA[:, lat_num, lon_num] = \\\n CORRECTION_FACTOR\n else:\n CORRECTION_FACTOR = MON_BC_VALUE/MON_RAW_VALUE\n OUTPUT_BC_DATA[:, lat_num, lon_num] = \\\n INPUT_RAW_DATA[:, lat_num, \\\n lon_num]*CORRECTION_FACTOR\n else:\n CORRECTION_FACTOR = MON_BC_VALUE-MON_RAW_VALUE\n OUTPUT_BC_DATA[:, lat_num, lon_num] = \\\n INPUT_RAW_DATA[:, lat_num, lon_num]+\\\n CORRECTION_FACTOR\n ### Finish correcting values for all timesteps in the given\n ### month and ensemble member\n print(f\"Now writing {OUTFILE}\")\n OUTPUT_BC_DATA = np.ma.masked_array(OUTPUT_BC_DATA, \\\n mask=OUTPUT_BC_DATA == -999)\n date = [FCST_DATE+relativedelta(hours=n*6) for n in \\\n range(NUM_TIMESTEPS)]\n write_bc_netcdf(OUTFILE, OUTPUT_BC_DATA, OBS_VAR, \\\n 'Bias corrected forecasts', 'MODEL:' + MODEL_NAME, UNIT, \\\n OBS_VAR, LONS, LATS, FCST_DATE, date, 5, 39.875, 59.875, -39.875, \\\n -19.875, 0.25, 0.25, 21600)\n" ]
[ [ "numpy.ma.masked_array" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.13", "1.16", "1.9", "1.18", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
hvanwyk/atomic_data_uncertainties
[ "e6b376d600090203b20810c730a21021ea62ab44" ]
[ "recombination/make_plots_belike.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 22 15:43:22 2021\n\n@author: lochstu\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\n\ndata1=pickle.load(open('be_like_0.4_1.6_b_mg.pkl','rb'))\ndata2=pickle.load(open('be_like_0.4_1.6_k_fe.pkl','rb'))\n\n\n\nb_rate_avg_pos=data1['be_like_b_pos']['rate_avg']\nb_rate_avg_neg=data1['be_like_b_neg']['rate_avg']\nb_rate_std_pos=data1['be_like_b_pos']['rate_std']\nb_rate_std_neg=data1['be_like_b_neg']['rate_std']\nb_rate_percent_pos=data1['be_like_b_pos']['rate_percent']\nb_rate_percent_neg=data1['be_like_b_neg']['rate_percent']\nb_rate_samples_pos=data1['be_like_b_pos']['rate_samples']\nb_rate_samples_neg=data1['be_like_b_neg']['rate_samples']\nb_T=data1['be_like_b_pos']['T']\nb_rate_samples_combined=np.concatenate((b_rate_samples_pos,b_rate_samples_neg))\nb_rate_avg_combined = np.average(b_rate_samples_combined,axis=0)\nb_rate_std_combined = np.std(b_rate_samples_combined,axis=0)\nb_rate_percent_combined = b_rate_std_combined/b_rate_avg_combined*100.\nb_atom='B'\nseq=data1['seq']\nb_T_ev=b_T/11604.\n\nc_rate_avg_pos=data1['be_like_c_pos']['rate_avg']\nc_rate_avg_neg=data1['be_like_c_neg']['rate_avg']\nc_rate_std_pos=data1['be_like_c_pos']['rate_std']\nc_rate_std_neg=data1['be_like_c_neg']['rate_std']\nc_rate_percent_pos=data1['be_like_c_pos']['rate_percent']\nc_rate_percent_neg=data1['be_like_c_neg']['rate_percent']\nc_rate_samples_pos=data1['be_like_c_pos']['rate_samples']\nc_rate_samples_neg=data1['be_like_c_neg']['rate_samples']\nc_T=data1['be_like_c_pos']['T']\nc_rate_samples_combined=np.concatenate((c_rate_samples_pos,c_rate_samples_neg))\nc_rate_avg_combined = np.average(c_rate_samples_combined,axis=0)\nc_rate_std_combined = np.std(c_rate_samples_combined,axis=0)\nc_rate_percent_combined = c_rate_std_combined/c_rate_avg_combined*100.\nc_atom='C'\nc_T_ev=c_T/11604.\n\nn_rate_avg_pos=data1['be_like_n_pos']['rate_avg']\nn_rate_avg_neg=data1['be_like_n_neg']['rate_avg']\nn_rate_std_pos=data1['be_like_n_pos']['rate_std']\nn_rate_std_neg=data1['be_like_n_neg']['rate_std']\nn_rate_percent_pos=data1['be_like_n_pos']['rate_percent']\nn_rate_percent_neg=data1['be_like_n_neg']['rate_percent']\nn_rate_samples_pos=data1['be_like_n_pos']['rate_samples']\nn_rate_samples_neg=data1['be_like_n_neg']['rate_samples']\nn_T=data1['be_like_n_pos']['T']\nn_rate_samples_combined=np.concatenate((n_rate_samples_pos,n_rate_samples_neg))\nn_rate_avg_combined = np.average(n_rate_samples_combined,axis=0)\nn_rate_std_combined = np.std(n_rate_samples_combined,axis=0)\nn_rate_percent_combined = n_rate_std_combined/n_rate_avg_combined*100.\nn_atom='N'\nseq=data1['seq']\nn_T_ev=n_T/11604.\n\no_rate_avg_pos=data1['be_like_o_pos']['rate_avg']\no_rate_avg_neg=data1['be_like_o_neg']['rate_avg']\no_rate_std_pos=data1['be_like_o_pos']['rate_std']\no_rate_std_neg=data1['be_like_o_neg']['rate_std']\no_rate_percent_pos=data1['be_like_o_pos']['rate_percent']\no_rate_percent_neg=data1['be_like_o_neg']['rate_percent']\no_rate_samples_pos=data1['be_like_o_pos']['rate_samples']\no_rate_samples_neg=data1['be_like_o_neg']['rate_samples']\no_T=data1['be_like_o_pos']['T']\no_rate_samples_combined=np.concatenate((o_rate_samples_pos,o_rate_samples_neg))\no_rate_avg_combined = np.average(o_rate_samples_combined,axis=0)\no_rate_std_combined = np.std(o_rate_samples_combined,axis=0)\no_rate_percent_combined = o_rate_std_combined/o_rate_avg_combined*100.\no_atom='O'\no_T_ev=o_T/11604.\n\n\nf_rate_avg_pos=data1['be_like_f_pos']['rate_avg']\nf_rate_avg_neg=data1['be_like_f_neg']['rate_avg']\nf_rate_std_pos=data1['be_like_f_pos']['rate_std']\nf_rate_std_neg=data1['be_like_f_neg']['rate_std']\nf_rate_percent_pos=data1['be_like_f_pos']['rate_percent']\nf_rate_percent_neg=data1['be_like_f_neg']['rate_percent']\nf_rate_samples_pos=data1['be_like_f_pos']['rate_samples']\nf_rate_samples_neg=data1['be_like_f_neg']['rate_samples']\nf_T=data1['be_like_f_pos']['T']\nf_rate_samples_combined=np.concatenate((f_rate_samples_pos,f_rate_samples_neg))\nf_rate_avg_combined = np.average(f_rate_samples_combined,axis=0)\nf_rate_std_combined = np.std(f_rate_samples_combined,axis=0)\nf_rate_percent_combined = f_rate_std_combined/f_rate_avg_combined*100.\nf_atom='F'\nf_T_ev=f_T/11604.\n\nne_rate_avg_pos=data1['be_like_ne_pos']['rate_avg']\nne_rate_avg_neg=data1['be_like_ne_neg']['rate_avg']\nne_rate_std_pos=data1['be_like_ne_pos']['rate_std']\nne_rate_std_neg=data1['be_like_ne_neg']['rate_std']\nne_rate_percent_pos=data1['be_like_ne_pos']['rate_percent']\nne_rate_percent_neg=data1['be_like_ne_neg']['rate_percent']\nne_rate_samples_pos=data1['be_like_ne_pos']['rate_samples']\nne_rate_samples_neg=data1['be_like_ne_neg']['rate_samples']\nne_T=data1['be_like_ne_pos']['T']\nne_rate_samples_combined=np.concatenate((ne_rate_samples_pos,ne_rate_samples_neg))\nne_rate_avg_combined = np.average(ne_rate_samples_combined,axis=0)\nne_rate_std_combined = np.std(ne_rate_samples_combined,axis=0)\nne_rate_percent_combined = ne_rate_std_combined/ne_rate_avg_combined*100.\nne_atom='Ne'\nne_T_ev=ne_T/11604.\n\nna_rate_avg_pos=data1['be_like_na_pos']['rate_avg']\nna_rate_avg_neg=data1['be_like_na_neg']['rate_avg']\nna_rate_std_pos=data1['be_like_na_pos']['rate_std']\nna_rate_std_neg=data1['be_like_na_neg']['rate_std']\nna_rate_percent_pos=data1['be_like_na_pos']['rate_percent']\nna_rate_percent_neg=data1['be_like_na_neg']['rate_percent']\nna_rate_samples_pos=data1['be_like_na_pos']['rate_samples']\nna_rate_samples_neg=data1['be_like_na_neg']['rate_samples']\nna_T=data1['be_like_na_pos']['T']\nna_rate_samples_combined=np.concatenate((na_rate_samples_pos,na_rate_samples_neg))\nna_rate_avg_combined = np.average(na_rate_samples_combined,axis=0)\nna_rate_std_combined = np.std(na_rate_samples_combined,axis=0)\nna_rate_percent_combined = na_rate_std_combined/na_rate_avg_combined*100.\nna_atom='Na'\nna_T_ev=na_T/11604.\n\nmg_rate_avg_pos=data1['be_like_mg_pos']['rate_avg']\nmg_rate_avg_neg=data1['be_like_mg_neg']['rate_avg']\nmg_rate_std_pos=data1['be_like_mg_pos']['rate_std']\nmg_rate_std_neg=data1['be_like_mg_neg']['rate_std']\nmg_rate_percent_pos=data1['be_like_mg_pos']['rate_percent']\nmg_rate_percent_neg=data1['be_like_mg_neg']['rate_percent']\nmg_rate_samples_pos=data1['be_like_mg_pos']['rate_samples']\nmg_rate_samples_neg=data1['be_like_mg_neg']['rate_samples']\nmg_T=data1['be_like_mg_pos']['T']\nmg_rate_samples_combined=np.concatenate((mg_rate_samples_pos,mg_rate_samples_neg))\nmg_rate_avg_combined = np.average(mg_rate_samples_combined,axis=0)\nmg_rate_std_combined = np.std(mg_rate_samples_combined,axis=0)\nmg_rate_percent_combined = mg_rate_std_combined/mg_rate_avg_combined*100.\nmg_atom='Mg'\nmg_T_ev=mg_T/11604.\n\nk_rate_avg_pos=data2['be_like_k_pos']['rate_avg']\nk_rate_avg_neg=data2['be_like_k_neg']['rate_avg']\nk_rate_std_pos=data2['be_like_k_pos']['rate_std']\nk_rate_std_neg=data2['be_like_k_neg']['rate_std']\nk_rate_percent_pos=data2['be_like_k_pos']['rate_percent']\nk_rate_percent_neg=data2['be_like_k_neg']['rate_percent']\nk_rate_samples_pos=data2['be_like_k_pos']['rate_samples']\nk_rate_samples_neg=data2['be_like_k_neg']['rate_samples']\nk_T=data2['be_like_k_pos']['T']\nk_rate_samples_combined=np.concatenate((k_rate_samples_pos,k_rate_samples_neg))\nk_rate_avg_combined = np.average(k_rate_samples_combined,axis=0)\nk_rate_std_combined = np.std(k_rate_samples_combined,axis=0)\nk_rate_percent_combined = k_rate_std_combined/k_rate_avg_combined*100.\nk_atom='K'\nk_T_ev=k_T/11604.\n\nca_rate_avg_pos=data2['be_like_ca_pos']['rate_avg']\nca_rate_avg_neg=data2['be_like_ca_neg']['rate_avg']\nca_rate_std_pos=data2['be_like_ca_pos']['rate_std']\nca_rate_std_neg=data2['be_like_ca_neg']['rate_std']\nca_rate_percent_pos=data2['be_like_ca_pos']['rate_percent']\nca_rate_percent_neg=data2['be_like_ca_neg']['rate_percent']\nca_rate_samples_pos=data2['be_like_ca_pos']['rate_samples']\nca_rate_samples_neg=data2['be_like_ca_neg']['rate_samples']\nca_T=data2['be_like_ca_pos']['T']\nca_rate_samples_combined=np.concatenate((ca_rate_samples_pos,ca_rate_samples_neg))\nca_rate_avg_combined = np.average(ca_rate_samples_combined,axis=0)\nca_rate_std_combined = np.std(ca_rate_samples_combined,axis=0)\nca_rate_percent_combined = ca_rate_std_combined/ca_rate_avg_combined*100.\nca_atom='Ca'\nca_T_ev=ca_T/11604.\n\nsc_rate_avg_pos=data2['be_like_sc_pos']['rate_avg']\nsc_rate_avg_neg=data2['be_like_sc_neg']['rate_avg']\nsc_rate_std_pos=data2['be_like_sc_pos']['rate_std']\nsc_rate_std_neg=data2['be_like_sc_neg']['rate_std']\nsc_rate_percent_pos=data2['be_like_sc_pos']['rate_percent']\nsc_rate_percent_neg=data2['be_like_sc_neg']['rate_percent']\nsc_rate_samples_pos=data2['be_like_sc_pos']['rate_samples']\nsc_rate_samples_neg=data2['be_like_sc_neg']['rate_samples']\nsc_T=data2['be_like_sc_pos']['T']\nsc_rate_samples_combined=np.concatenate((sc_rate_samples_pos,sc_rate_samples_neg))\nsc_rate_avg_combined = np.average(sc_rate_samples_combined,axis=0)\nsc_rate_std_combined = np.std(sc_rate_samples_combined,axis=0)\nsc_rate_percent_combined = sc_rate_std_combined/sc_rate_avg_combined*100.\nsc_atom='Sc'\nsc_T_ev=sc_T/11604.\n\nti_rate_avg_pos=data2['be_like_ti_pos']['rate_avg']\nti_rate_avg_neg=data2['be_like_ti_neg']['rate_avg']\nti_rate_std_pos=data2['be_like_ti_pos']['rate_std']\nti_rate_std_neg=data2['be_like_ti_neg']['rate_std']\nti_rate_percent_pos=data2['be_like_ti_pos']['rate_percent']\nti_rate_percent_neg=data2['be_like_ti_neg']['rate_percent']\nti_rate_samples_pos=data2['be_like_ti_pos']['rate_samples']\nti_rate_samples_neg=data2['be_like_ti_neg']['rate_samples']\nti_T=data2['be_like_ti_pos']['T']\nti_rate_samples_combined=np.concatenate((ti_rate_samples_pos,ti_rate_samples_neg))\nti_rate_avg_combined = np.average(ti_rate_samples_combined,axis=0)\nti_rate_std_combined = np.std(ti_rate_samples_combined,axis=0)\nti_rate_percent_combined = ti_rate_std_combined/ti_rate_avg_combined*100.\nti_atom='Ti'\nti_T_ev=ti_T/11604.\n\nv_rate_avg_pos=data2['be_like_v_pos']['rate_avg']\nv_rate_avg_neg=data2['be_like_v_neg']['rate_avg']\nv_rate_std_pos=data2['be_like_v_pos']['rate_std']\nv_rate_std_neg=data2['be_like_v_neg']['rate_std']\nv_rate_percent_pos=data2['be_like_v_pos']['rate_percent']\nv_rate_percent_neg=data2['be_like_v_neg']['rate_percent']\nv_rate_samples_pos=data2['be_like_v_pos']['rate_samples']\nv_rate_samples_neg=data2['be_like_v_neg']['rate_samples']\nv_T=data2['be_like_v_pos']['T']\nv_rate_samples_combined=np.concatenate((v_rate_samples_pos,v_rate_samples_neg))\nv_rate_avg_combined = np.average(v_rate_samples_combined,axis=0)\nv_rate_std_combined = np.std(v_rate_samples_combined,axis=0)\nv_rate_percent_combined = v_rate_std_combined/v_rate_avg_combined*100.\nv_atom='V'\nv_T_ev=v_T/11604.\n\ncr_rate_avg_pos=data2['be_like_cr_pos']['rate_avg']\ncr_rate_avg_neg=data2['be_like_cr_neg']['rate_avg']\ncr_rate_std_pos=data2['be_like_cr_pos']['rate_std']\ncr_rate_std_neg=data2['be_like_cr_neg']['rate_std']\ncr_rate_percent_pos=data2['be_like_cr_pos']['rate_percent']\ncr_rate_percent_neg=data2['be_like_cr_neg']['rate_percent']\ncr_rate_samples_pos=data2['be_like_cr_pos']['rate_samples']\ncr_rate_samples_neg=data2['be_like_cr_neg']['rate_samples']\ncr_T=data2['be_like_cr_pos']['T']\ncr_rate_samples_combined=np.concatenate((cr_rate_samples_pos,cr_rate_samples_neg))\ncr_rate_avg_combined = np.average(cr_rate_samples_combined,axis=0)\ncr_rate_std_combined = np.std(cr_rate_samples_combined,axis=0)\ncr_rate_percent_combined = cr_rate_std_combined/cr_rate_avg_combined*100.\ncr_atom='Cr'\ncr_T_ev=cr_T/11604.\n\nmn_rate_avg_pos=data2['be_like_mn_pos']['rate_avg']\nmn_rate_avg_neg=data2['be_like_mn_neg']['rate_avg']\nmn_rate_std_pos=data2['be_like_mn_pos']['rate_std']\nmn_rate_std_neg=data2['be_like_mn_neg']['rate_std']\nmn_rate_percent_pos=data2['be_like_mn_pos']['rate_percent']\nmn_rate_percent_neg=data2['be_like_mn_neg']['rate_percent']\nmn_rate_samples_pos=data2['be_like_mn_pos']['rate_samples']\nmn_rate_samples_neg=data2['be_like_mn_neg']['rate_samples']\nmn_T=data2['be_like_mn_pos']['T']\nmn_rate_samples_combined=np.concatenate((mn_rate_samples_pos,mn_rate_samples_neg))\nmn_rate_avg_combined = np.average(mn_rate_samples_combined,axis=0)\nmn_rate_std_combined = np.std(mn_rate_samples_combined,axis=0)\nmn_rate_percent_combined = mn_rate_std_combined/mn_rate_avg_combined*100.\nmn_atom='Mn'\nmn_T_ev=mn_T/11604.\n\nfe_rate_avg_pos=data2['be_like_fe_pos']['rate_avg']\nfe_rate_avg_neg=data2['be_like_fe_neg']['rate_avg']\nfe_rate_std_pos=data2['be_like_fe_pos']['rate_std']\nfe_rate_std_neg=data2['be_like_fe_neg']['rate_std']\nfe_rate_percent_pos=data2['be_like_fe_pos']['rate_percent']\nfe_rate_percent_neg=data2['be_like_fe_neg']['rate_percent']\nfe_rate_samples_pos=data2['be_like_fe_pos']['rate_samples']\nfe_rate_samples_neg=data2['be_like_fe_neg']['rate_samples']\nfe_T=data2['be_like_fe_pos']['T']\nfe_rate_samples_combined=np.concatenate((fe_rate_samples_pos,fe_rate_samples_neg))\nfe_rate_avg_combined = np.average(fe_rate_samples_combined,axis=0)\nfe_rate_std_combined = np.std(fe_rate_samples_combined,axis=0)\nfe_rate_percent_combined = fe_rate_std_combined/fe_rate_avg_combined*100.\nfe_atom='Fe'\nfe_T_ev=fe_T/11604.\n\n\nfig3, axs3 = plt.subplots(1,1)\ntitle_pos='Uncertainty coefficient for ' + seq + '-like' \naxs3.set_xscale(\"log\")\naxs3.set_yscale(\"log\")\naxs3.set_xlabel('Electron Temperature (K)')\naxs3.set_ylabel('Percentage uncertainty')\naxs3.set_title(title_pos)\naxs3.plot(b_T,b_rate_percent_combined,label=b_atom)\naxs3.plot(c_T,c_rate_percent_combined,label=c_atom)\naxs3.plot(n_T,n_rate_percent_combined,label=n_atom)\naxs3.plot(o_T,o_rate_percent_combined,label=o_atom)\naxs3.plot(f_T,f_rate_percent_combined,label=f_atom)\naxs3.plot(ne_T,ne_rate_percent_combined,label=ne_atom)\naxs3.plot(na_T,na_rate_percent_combined,label=na_atom)\naxs3.plot(mg_T,mg_rate_percent_combined,label=mg_atom)\naxs3.plot(k_T,k_rate_percent_combined,label=k_atom)\naxs3.plot(ca_T,ca_rate_percent_combined,label=ca_atom)\naxs3.plot(sc_T,sc_rate_percent_combined,label=sc_atom)\naxs3.plot(ti_T,ti_rate_percent_combined,label=ti_atom)\naxs3.plot(v_T,v_rate_percent_combined,label=v_atom)\naxs3.plot(cr_T,cr_rate_percent_combined,label=cr_atom)\naxs3.plot(mn_T,mn_rate_percent_combined,label=mn_atom)\naxs3.plot(fe_T,fe_rate_percent_combined,label=fe_atom)\naxs3.legend()\n\n\n" ]
[ [ "numpy.concatenate", "numpy.std", "numpy.average", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GeoscienceAustralia/wistl
[ "fa8b6aeaabb902ea72085b3552b5167cd20040a4" ]
[ "wistl/tests/test_line.py" ]
[ "\nimport unittest\nimport os\nimport numpy as np\nimport copy\nimport logging\nimport pandas as pd\nfrom scipy.stats import itemfreq\n\nfrom wistl.config import Config\nfrom wistl.line import Line, adjust_value_to_line, adjust_index_to_line\nfrom wistl.tests.test_config import assertDeepAlmostEqual\nfrom wistl.tests.test_tower import create_wind_given_bearing\n# from wistl.transmission_network import read_shape_file, populate_df_lines, \\\n# populate_df_towers\n\n#ATOL = 0.0005\n#RTOL = 0.01\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\nclass TestLine1(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n\n logging.basicConfig(level=logging.ERROR)\n cls.logger = logging.getLogger(__name__)\n\n cls.cfg = Config(os.path.join(BASE_DIR, 'test.cfg'), logger=cls.logger)\n\n event_name = 'test1'\n event_scale = 3.0\n path_event = os.path.join(cls.cfg.path_wind_event_base,\n event_name)\n path_output = os.path.join(cls.cfg.path_output, 'test1_s3.0')\n cls.no_sims = 500000\n\n # LineB\n dic_line = cls.cfg.lines['LineB'].copy()\n dic_line.update({'name': 'LineB',\n 'no_sims': cls.no_sims,\n 'damage_states': cls.cfg.damage_states,\n 'non_collapse': cls.cfg.non_collapse,\n 'event_id': cls.cfg.event_id_format.format(event_name=event_name, scale=event_scale),\n 'scale': event_scale,\n 'rtol': cls.cfg.rtol,\n 'atol': cls.cfg.atol,\n 'dmg_threshold': cls.cfg.dmg_threshold,\n 'rnd_state': np.random.RandomState(0),\n 'path_event': path_event,\n 'path_output': path_output,\n 'dic_towers': cls.cfg.towers_by_line['LineB']})\n\n cls.line = Line(**dic_line)\n\n for _, tower in cls.line.towers.items():\n tower.init()\n tower._wind = create_wind_given_bearing(10.0, 1.0)\n tower.axisaz = 11.0\n\n @classmethod\n def tearDown(cls):\n try:\n os.remove(cls.line.file_output)\n os.removedirs(cls.line.output_path)\n except:\n pass\n\n def test_repr(self):\n expected = f'Line(name=LineB, no_towers=22, event_id=test1_s3.0)'\n self.assertEqual(repr(self.line), expected)\n\n def test_towers(self):\n\n self.assertEqual(self.line.no_towers, 22)\n self.assertEqual(self.line.dmg_time_idx, (0, 2))\n #print(self.line.time)\n #self.assertEqual(self.line.no_time, 3)\n\n self.assertEqual(self.line.towers[0].name, 'T23')\n self.assertEqual(self.line.towers[0].idl, 0)\n self.assertEqual(self.line.towers[0].idn, 'T23')\n self.assertEqual(self.line.towers[0].no_time, 2)\n self.assertEqual(self.line.towers[0].no_sims, self.no_sims)\n self.assertEqual(self.line.towers[0].damage_states, ['minor', 'collapse'])\n self.assertAlmostEqual(self.line.towers[0].scale, 3.0)\n\n def test_time(self):\n\n pd.testing.assert_index_equal(self.line.time, self.line.towers[0].wind.index[0:2])\n\n def test_no_time(self):\n\n self.assertEqual(self.line.no_time, 2)\n\n def test_adjust_value_to_line1(self):\n\n line_dmg_time_idx = (0, 3)\n tower_dmg_time_idx = (0, 2)\n prob = [1, 2]\n result = adjust_value_to_line(line_dmg_time_idx,\n tower_dmg_time_idx, prob)\n expected = np.array([1, 2, 0])\n np.testing.assert_equal(result, expected)\n\n def test_adjust_value_to_line2(self):\n\n line_dmg_time_idx = (1870, 1879)\n prob = [1, 2, 3, 4]\n tower_dmg_time_idx = (1874, 1878)\n result = adjust_value_to_line(line_dmg_time_idx,\n tower_dmg_time_idx, prob)\n expected = np.array([0, 0, 0, 0, 1, 2, 3, 4, 0])\n np.testing.assert_equal(result, expected)\n\n def test_adjust_index_to_line1(self):\n\n line_dmg_time_idx = (0, 3)\n tower_dmg_time_idx = (0, 2)\n prob = np.array([0, 1, 1, 0])\n idt0, idx = adjust_index_to_line(line_dmg_time_idx,\n tower_dmg_time_idx, prob)\n self.assertEqual(idt0, 0)\n expected = np.ones_like(prob, dtype=bool)\n np.testing.assert_equal(idx, expected)\n\n def test_adjust_index_to_line2(self):\n\n line_dmg_time_idx = (1870, 1879)\n prob = np.array([0, 1, 2, 3, 0, 1, 2, 3])\n tower_dmg_time_idx = (1874, 1878)\n idt0, idx = adjust_index_to_line(line_dmg_time_idx,\n tower_dmg_time_idx, prob)\n self.assertEqual(idt0, -4)\n expected = np.ones_like(prob, dtype=bool)\n np.testing.assert_equal(idx, expected)\n\n def test_damage_prob(self):\n name = 'T26'\n # for name, tower in self.line1.towers.items():\n self.line._damage_prob = None\n\n tower = self.line.towers[3]\n #tower._dmg = None\n #tower._dmg_sim = None\n self.assertEqual(tower.name, name)\n # T26 (in the middle)\n # o----o----x----o----o\n # collapse\n # lognorm.cdf(1.0, 0.03, scale=1.05)\n p0c = 0.0519\n p0gn2 = p0c * 0.125\n p0gn1 = p0c * 0.575\n p0gp1 = p0c * 0.575\n p0gp2 = p0c * 0.125\n pc = 1 - (1-p0c)*(1-p0gn2)*(1-p0gn1)*(1-p0gp1)*(1-p0gp2)\n\n self.assertAlmostEqual(p0c, tower.dmg['collapse'][0], places=3)\n #self.assertAlmostEqual(p0c, tower.dmg_sim['collapse'][0], places=3)\n\n self.assertAlmostEqual(p0c, self.line.towers[1].dmg['collapse'][0], places=3)\n self.assertAlmostEqual(p0c, self.line.towers[2].dmg['collapse'][0], places=3)\n self.assertAlmostEqual(p0c, self.line.towers[4].dmg['collapse'][0], places=3)\n self.assertAlmostEqual(p0c, self.line.towers[5].dmg['collapse'][0], places=3)\n\n self.assertAlmostEqual(p0gn2, tower.collapse_adj[1][0], places=3)\n self.assertAlmostEqual(p0gn1, tower.collapse_adj[2][0], places=3)\n self.assertAlmostEqual(p0gp1, tower.collapse_adj[4][0], places=3)\n self.assertAlmostEqual(p0gp2, tower.collapse_adj[5][0], places=3)\n self.line.compute_damage_prob()\n self.assertAlmostEqual(\n pc, self.line.damage_prob['collapse'][name][0], places=3)\n\n # T26 (in the middle)\n # o----o----x----o----o\n # minor\n # lognorm.cdf(1.0, 0.03, scale=1.02)\n p0m = 0.16105\n pm = min(p0m - p0c + pc, 1.0)\n\n self.assertAlmostEqual(p0m, tower.dmg['minor'][0], places=3)\n #self.assertAlmostEqual(p0m, tower.dmg_sim['minor'][0], places=3)\n self.assertAlmostEqual(\n pm, self.line.damage_prob['minor'][name][0], places=3)\n\n\n def test_compute_damage_prob_sim(self):\n\n name = 'T26'\n\n tower = self.line.towers[3]\n #tower._dmg = None\n #tower._dmg_id_sim = None\n #tower._dmg_sim = None\n self.assertEqual(tower.name, name)\n # T26 (in the middle)\n # o----o----x----o----o\n # collapse\n # lognorm.cdf(1.0, 0.03, scale=1.05)\n p0c = 0.0519\n p0gn2 = p0c * 0.125\n p0gn1 = p0c * 0.575\n p0gp1 = p0c * 0.575\n p0gp2 = p0c * 0.125\n pc = 1 - (1-p0c)*(1-p0gn2)*(1-p0gn1)*(1-p0gp1)*(1-p0gp2)\n\n with self.assertLogs('wistl', level='INFO') as cm:\n self.line.compute_damage_prob()\n self.line.compute_damage_prob_sim()\n\n try:\n np.testing.assert_allclose(\n pc, self.line.damage_prob_sim['collapse'][name][0], atol=self.cfg.atol, rtol=self.cfg.rtol)\n except AssertionError:\n self.logger.warning(\n f'P(C) Theory: {pc:.4f}, '\n f\"Analytical: {self.line.damage_prob['collapse'][name][0]:.4f}, \"\n f\"Simulation: {self.line.damage_prob_sim['collapse'][name][0]:.4f}\")\n\n for _id in range(23, 45):\n name = f'T{_id}'\n try:\n np.testing.assert_allclose(self.line.damage_prob['collapse'][name][0],\n self.line.damage_prob_sim['collapse'][name][0], atol=self.cfg.atol, rtol=self.cfg.rtol)\n except AssertionError:\n self.logger.warning(\n f'Tower: {name}, collapse '\n f\"Analytical: {self.line.damage_prob['collapse'][name][0]:.4f}, \"\n f\"Simulation: {self.line.damage_prob_sim['collapse'][name][0]:.4f}\")\n\n # o----o----x----o----o\n # minor\n # lognorm.cdf(1.0, 0.02, scale=1.02) \n p0m = 0.16105\n pm = min(p0m - p0c + pc, 1.0)\n try:\n self.assertTrue(\n pm >= self.line.damage_prob_sim['minor'][name][0])\n except AssertionError:\n self.logger.warning(\n f'P(m) Theory: {pm:.4f}, '\n f\"Analytical: {self.line.damage_prob['minor'][name][0]:.4f}, \"\n f\"Simulation: {self.line.damage_prob_sim['minor'][name][0]:.4f}\")\n\n # except 32, strainer tower\n for _id in list(range(23, 32)) + list(range(33, 45)):\n name = f'T{_id}'\n try:\n self.assertTrue(self.line.damage_prob['minor'][name][0]\n >= self.line.damage_prob_sim['minor'][name][0])\n except AssertionError:\n self.logger.warning(\n f'Tower: {name}, minor, '\n f\"Analytical: {self.line.damage_prob['minor'][name][0]:.4f}, \"\n f\"Simulation: {self.line.damage_prob_sim['minor'][name][0]:.4f}\")\n\n def test_compute_damage_prob_sim_no_cascading(self):\n\n name = 'T26'\n\n tower = self.line.towers[3]\n #tower._dmg = None\n #tower._dmg_id_sim = None\n #tower._dmg_sim = None\n self.assertEqual(tower.name, name)\n # T26 (in the middle)\n # o----o----x----o----o\n # collapse\n # lognorm.cdf(1.0, 0.03, scale=1.05)\n pc = tower.dmg['collapse']\n self.line.compute_damage_prob_sim_no_cascading()\n try:\n np.testing.assert_allclose(\n pc, self.line.damage_prob_sim_no_cascading['collapse'][name], atol=self.cfg.atol, rtol=self.cfg.rtol)\n except AssertionError:\n self.logger.warning(\n f'P(C) Theory: {pc[0]}, '\n f\"Simulation: {self.line.damage_prob_sim_no_cascading['collapse'][name][0]}\")\n\n # o----o----x----o----o\n # minor\n # lognorm.cdf(1.0, 0.02, scale=1.02) \n pm = tower.dmg['minor']\n try:\n np.testing.assert_allclose(\n pm, self.line.damage_prob_sim_no_cascading['minor'][name], atol=self.cfg.atol, rtol=self.cfg.rtol)\n except AssertionError:\n self.logger.warning(\n f'P(m) Theory: {pm.values}, '\n f\"Simulation: {self.line.damage_prob_sim_no_cascading['minor'][name].values}\")\n\n # except 32, strainer tower\n for _id in self.line.dmg_towers:\n name = self.line.towers[_id].name\n idt0, idt1 = self.line.towers[_id].dmg_time_idx\n try:\n np.testing.assert_allclose(\n self.line.towers[_id].dmg['minor'], self.line.damage_prob_sim_no_cascading['minor'].iloc[idt0:idt1][name], atol=self.cfg.atol, rtol=self.cfg.rtol)\n except AssertionError:\n\n self.logger.warning(\n f'Tower: {name}, minor, '\n f\"Theory: {self.line.towers[_id].dmg['minor'].values}, \"\n f\"Simulation: {self.line.damage_prob_sim_no_cascading['minor'][name].values}\")\n\n def test_compute_stats(self):\n\n no_sims = 10\n rnd_state = np.random.RandomState(1)\n event_name = 'test1'\n path_event = os.path.join(self.cfg.path_wind_event_base,\n event_name)\n\n # LineB\n dic_line = self.cfg.lines['LineB'].copy()\n dic_line.update({'name': 'LineB',\n 'no_sims': no_sims,\n 'damage_states': self.cfg.damage_states,\n 'non_collapse': self.cfg.non_collapse,\n 'event_name': event_name,\n 'scale': 1.0,\n 'rtol': self.cfg.rtol,\n 'atol': self.cfg.atol,\n 'dmg_threshold': self.cfg.dmg_threshold,\n 'rnd_state': rnd_state,\n 'path_event': path_event,\n 'dic_towers': self.cfg.towers_by_line['LineB']})\n\n line = Line(**dic_line)\n\n for _, tower in line.towers.items():\n tower.init()\n tower._wind = create_wind_given_bearing(10.0, 1.0)\n tower.axisaz = 11.0\n\n with self.assertLogs('wistl', level='INFO') as cm:\n tf_ds = np.zeros((line.no_towers, no_sims, line.no_time))\n tf_ds[:line.no_towers, 0:5, 0] = 1\n tf_ds[:line.no_towers, 0, 1] = 1\n\n tf_ds_minor = np.zeros_like(tf_ds)\n tf_ds_minor[:line.no_towers, 0:8, 0] = 1\n tf_ds_minor[:line.no_towers, 0:5, 1] = 1\n\n tf_sim = {'minor': tf_ds_minor, 'collapse': tf_ds}\n\n est_no_tower, prob_no_tower = line.compute_stats(tf_sim)\n\n # 22 * 0.5 + 0 * 0.5\n self.assertAlmostEqual(est_no_tower['collapse']['mean'][0], 11.0)\n # np.sqrt(22*22*0.5-11**2)\n self.assertAlmostEqual(est_no_tower['collapse']['std'][0], 11.0)\n\n # 22 * 0.1\n self.assertAlmostEqual(est_no_tower['collapse']['mean'][1], 2.2)\n # np.sqrt(22*22*0.1-2.2**2)\n self.assertAlmostEqual(est_no_tower['collapse']['std'][1], 6.6)\n\n # 22 * 0.3 + 0 * 0.7\n self.assertAlmostEqual(est_no_tower['minor']['mean'][0], 6.6)\n # np.sqrt(22*22*0.3-6.6**2)\n self.assertAlmostEqual(est_no_tower['minor']['std'][0], 10.082, places=3)\n\n # 22 * 0.4\n self.assertAlmostEqual(est_no_tower['minor']['mean'][1], 8.8)\n # np.sqrt(22*22*0.4-8.8**2)\n self.assertAlmostEqual(est_no_tower['minor']['std'][1], 10.778, places=2)\n\n @unittest.skip(\"Not efficient\")\n def test_compute_stats_given_timestamp(self):\n\n no_sims = 10\n rnd_state = np.random.RandomState(1)\n event_name = 'test1'\n path_event = os.path.join(self.cfg.path_wind_event_base,\n event_name)\n\n # LineB\n dic_line = self.cfg.lines['LineB'].copy()\n dic_line.update({'name': 'LineB',\n 'no_sims': no_sims,\n 'damage_states': self.cfg.damage_states,\n 'non_collapse': self.cfg.non_collapse,\n 'rtol': cls.cfg.rtol,\n 'atol': cls.cfg.atol,\n 'dmg_threshold': cls.cfg.dmg_threshold,\n 'event_name': event_name,\n 'scale': 1.0,\n 'rnd_state': rnd_state,\n 'path_event': path_event,\n 'dic_towers': self.cfg.towers_by_line['LineB']})\n\n line = Line(**dic_line)\n\n tf_ds = np.zeros((line.no_towers, no_sims))\n tf_ds[:line.no_towers, 0:5] = 1\n #tf_ds[:line.no_towers, 0] = 1\n\n tf_ds_minor = np.zeros_like(tf_ds)\n tf_ds_minor[:line.no_towers, 0:8] = 1\n #tf_ds_minor[:line.no_towers, 0:5] = 1\n\n tf_sim = {'minor': tf_ds_minor, 'collapse': tf_ds}\n\n prob_no_tower = line.compute_stats_given_timestamp(tf_sim)\n\n # collapse \n prob = np.zeros((line.no_towers + 1))\n prob[0] = 0.5\n prob[-1] = 0.5\n np.testing.assert_almost_equal(prob_no_tower['collapse'], prob)\n\n # minor\n prob = np.zeros((line.no_towers + 1))\n prob[0] = 0.7\n prob[-1] = 0.3\n np.testing.assert_almost_equal(prob_no_tower['minor'], prob)\n\n\n def test_write_output(self):\n pass\n\n\n\"\"\"\n def test_compute_damage_probability_simulation_alt(self):\n\n event_id = 'test2'\n scale = 2.5\n self.line1.event_tuple = (event_id, scale)\n\n seed = self.cfg.seed[event_id][self.line1.name]\n rnd_state = np.random.RandomState(seed)\n\n rv = rnd_state.uniform(size=(self.cfg.no_sims,\n len(self.line1.time)))\n\n # tf_ds = pd.Panel(np.zeros((self.cfg.no_sims,\n # len(self.line1.time),\n # self.line1.no_towers), dtype=bool),\n # items=range(self.cfg.no_sims),\n # major_axis=self.line1.time,\n # minor_axis=self.line1.name_by_line)\n\n tf_ds = np.zeros((self.line1.no_towers,\n self.cfg.no_sims,\n len(self.line1.time)), dtype=bool)\n\n for name, tower in self.line1.towers.items():\n tower.determine_damage_isolation_sim(rv)\n tower.determine_damage_adjacent_sim(seed)\n\n # print('{}'.format(name))\n # print('{}, {}'.format(tower.prob_damage_isolation['collapse'].max(),\n # tower.prob_damage_isolation['minor'].max()))\n # print ('{}'.format(tower.damage_sim['collapse'].head()))\n # print ('{}'.format(tower.damage_sim['minor'].head()))\n\n valid_ = tower.damage_sim['collapse'][\n tower.damage_sim['collapse'].id_adj.notnull()]\n\n # for key, grouped in tower.damage_sim['collapse'].groupby('id_time'):\n #\n # np.testing.assert_almost_equal(len(grouped)/float(self.cfg.no_sims),\n # tower.prob_damage_isolation.iloc[key, 'collapse'],\n # decimal=1)\n\n # for key, grouped in tower.damage_sim['collapse'].groupby('id_time'):\n\n for _, item in valid_.iterrows():\n # print('{}'.format(item))\n # print('{}:{}:{}'.format(item['id_sim'],\n # item['id_time'],\n # item['id_adj']))\n for idx in item['id_adj']:\n tf_ds[idx, item['id_sim'], item['id_time']] = True\n\n\"\"\"\n\n\nclass TestLine2(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n\n logging.basicConfig(level=logging.INFO)\n cls.logger = logging.getLogger(__name__)\n\n cls.cfg = Config(os.path.join(BASE_DIR, 'test.cfg'), logger=cls.logger)\n\n event_name = 'test2'\n event_scale = 1.0\n path_event = os.path.join(cls.cfg.path_wind_event_base,\n event_name)\n path_output = os.path.join(cls.cfg.path_output, 'test2_s1.0')\n # LineB\n dic_line = cls.cfg.lines['LineA'].copy()\n cls.no_sims = 10000\n dic_line.update({'name': 'LineA',\n 'no_sims': cls.no_sims,\n 'damage_states': cls.cfg.damage_states,\n 'non_collapse': cls.cfg.non_collapse,\n 'event_name': event_name,\n 'event_id': cls.cfg.event_id_format.format(event_name=event_name, scale=event_scale),\n 'rtol': cls.cfg.rtol,\n 'atol': cls.cfg.atol,\n 'dmg_threshold': cls.cfg.dmg_threshold,\n 'scale': event_scale,\n 'rnd_state': np.random.RandomState(0),\n 'path_event': path_event,\n 'path_output': path_output,\n 'dic_towers': cls.cfg.towers_by_line['LineA']})\n\n cls.line = Line(**dic_line)\n\n for _, tower in cls.line.towers.items():\n # tower._wind = create_wind_given_bearing(10.0, 1.0)\n tower.axisaz = 11.0\n tower._damage_prob = None\n tower._damage_prob_sim = None\n tower._dmg_sim = None\n tower._dmg_id_sim = None\n\n @classmethod\n def tearDown(cls):\n try:\n os.remove(cls.line.file_output)\n os.removedirs(cls.line.output_path)\n except:\n pass\n\n def test_compute_damage_per_line(self):\n with self.assertLogs('wistl', level='INFO') as cm:\n self.line.compute_damage_per_line(self.cfg)\n\n def test_logger_dmg_time_idx(self):\n\n with self.assertLogs('wistl.line', level='INFO') as cm:\n self.line.dmg_time_idx\n msg = f'Line:LineA sustains no damage'\n self.assertIn(f'INFO:wistl.line:{msg}', cm.output)\n\n\nclass TestLine3(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n\n cls.cfg = Config(os.path.join(BASE_DIR, 'test.cfg'))\n\n dic_line = cls.cfg.lines['LineA'].copy()\n cls.no_sims = 1000\n event_name = 'test2'\n event_scale = 2.0\n path_event = os.path.join(cls.cfg.path_wind_event_base,\n event_name)\n path_output = os.path.join(cls.cfg.path_output, 'test2_s2.0')\n dic_line.update({'name': 'LineA',\n 'no_sims': cls.no_sims,\n 'damage_states': cls.cfg.damage_states,\n 'non_collapse': cls.cfg.non_collapse,\n 'event_name': event_name,\n 'event_id': cls.cfg.event_id_format.format(event_name=event_name, scale=event_scale),\n 'rtol': 0.01,\n 'atol': 0.1,\n 'dmg_threshold': cls.cfg.dmg_threshold,\n 'scale': event_scale,\n 'rnd_state': np.random.RandomState(0),\n 'path_event': path_event,\n 'path_output': path_output,\n 'dic_towers': cls.cfg.towers_by_line['LineA']})\n\n cls.line = Line(**dic_line)\n\n @classmethod\n def tearDown(cls):\n try:\n os.remove(cls.line.file_output)\n os.removedirs(cls.line.path_output)\n except:\n pass\n\n def test_dmg_time_idx(self):\n #self.assertEqual(self.line.dmg_time_idx, (479, 481))\n self.assertEqual(self.line.dmg_time_idx, (476, 483))\n #for k, v in self.line.towers.items():\n # print(f'{k} -> {v.dmg_dmg_time_idx}')\n\n def test_dmg_towers(self):\n expected = set([16, 18, 19, 6, 7, 12, 2, 13, 14, 1, 10, 9, 20, 15, 4, 3])\n self.assertEqual(set(self.line.dmg_towers), expected)\n\n def test_compute_damage_per_line(self):\n with self.assertLogs('wistl', level='INFO') as cm:\n self.line.compute_damage_per_line(self.cfg)\n\n\nclass TestLine4(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n\n cls.cfg = Config(os.path.join(BASE_DIR, 'test_interaction.cfg'))\n\n dic_line = cls.cfg.lines['LineA'].copy()\n cls.no_sims = 10000\n event_name = 'test1'\n event_scale = 14.0\n path_event = os.path.join(cls.cfg.path_wind_event_base,\n event_name)\n path_output = os.path.join(cls.cfg.path_output, 'test1_s14.0')\n dic_line.update({'name': 'LineA',\n 'no_sims': cls.no_sims,\n 'damage_states': cls.cfg.damage_states,\n 'non_collapse': cls.cfg.non_collapse,\n 'event_name': event_name,\n 'event_id': cls.cfg.event_id_format.format(event_name=event_name, scale=event_scale),\n 'rtol': 0.01,\n 'atol': 0.1,\n 'dmg_threshold': cls.cfg.dmg_threshold,\n 'scale': event_scale,\n 'rnd_state': np.random.RandomState(0),\n 'path_event': path_event,\n 'path_output': path_output,\n 'dic_towers': cls.cfg.towers_by_line['LineA']})\n\n cls.line = Line(**dic_line)\n\n with cls.assertLogs('wistl', level='INFO') as cm:\n cls.line.compute_damage_prob()\n cls.line.compute_damage_prob_sim()\n\n def test_dmg_idx(self):\n\n self.assertEqual(set(self.line.dmg_towers), {16, 0})\n\n self.assertEqual(self.line.towers[0].dmg_time_idx, (0, 3))\n self.assertEqual(self.line.towers[0].dmg_idxmax, [2])\n self.assertEqual(self.line.towers[16].dmg_time_idx, (1, 3))\n self.assertEqual(self.line.towers[16].dmg_idxmax, [2])\n self.assertEqual(self.line.dmg_time_idx, (0, 3))\n\n df = pd.DataFrame(np.column_stack(self.line.dmg_idx['collapse']), columns=['idl', 'id_sim', 'id_time'])\n self.assertEqual(set(df['id_time'].unique()), {0, 1, 2})\n\n for idl in range(0, 3):\n self.assertTrue(set(df.loc[df['idl']==idl, 'id_time'].unique()).issubset({0, 1, 2}))\n\n for idl in range(14, 19):\n self.assertTrue(set(df.loc[df['idl']==idl, 'id_time'].unique()).issubset({0, 1, 2}))\n\n def test_dmg_idx_interaction(self):\n\n self.assertEqual(set(self.line.dmg_towers), {16, 0})\n\n self.assertEqual(self.line.towers[0].dmg_time_idx, (0, 3))\n self.assertEqual(self.line.towers[16].dmg_time_idx, (1, 3))\n\n self.assertEqual(set(self.line.towers[0].collapse_interaction['id_time'].unique()), {0, 1, 2})\n self.assertEqual(set(self.line.towers[16].collapse_interaction['id_time'].unique()), {0, 1})\n\n self.assertEqual(self.line.towers[0].target_line['LineB']['id'], 0)\n self.assertEqual(self.line.towers[16].target_line['LineB']['id'], 16)\n\n self.assertEqual(self.line.towers[0].target_line['LineC']['id'], 0)\n self.assertEqual(self.line.towers[16].target_line['LineC']['id'], 16)\n\n self.assertEqual(self.line.target_no_towers, {'LineB': 22, 'LineC': 22})\n\n # tower[0]: (0, C), (1, B), (2, B)\n df_count0 = self.line.towers[0].collapse_interaction.groupby('id_time').agg(len)\n # tower[16]: (0, C), (1, B)\n df_count1 = self.line.towers[16].collapse_interaction.groupby('id_time').agg(len)\n\n dfb = pd.DataFrame(self.line.dmg_idx_interaction['LineB'], columns=['idl', 'id_sim', 'id_time'])\n x = dfb.groupby(['idl' ,'id_time']).agg(len).reset_index()\n\n dfc = pd.DataFrame(self.line.dmg_idx_interaction['LineC'], columns=['idl', 'id_sim', 'id_time'])\n y = dfc.groupby(['idl' ,'id_time']).agg(len).reset_index()\n\n # tower0, B\n dt = self.line.towers[0].dmg_time_idx[0] - self.line.dmg_time_idx[0]\n for idt in [1, 2]:\n self.assertEqual(x.loc[(x.id_time == idt + dt) & (x.idl==0), 'id_sim'].values[0], df_count0.loc[idt, 'id_sim'])\n\n # tower0, C\n for idt in [0]:\n self.assertEqual(y.loc[(y.id_time == idt + dt) & (y.idl==0), 'id_sim'].values[0], df_count0.loc[idt, 'id_sim'])\n\n # tower16, B\n dt = self.line.towers[16].dmg_time_idx[0] - self.line.dmg_time_idx[0]\n for idt in [1]:\n self.assertEqual(x.loc[(x.id_time == idt + dt) & (x.idl==16), 'id_sim'].values[0], df_count1.loc[idt, 'id_sim'])\n\n # tower16, C\n for idt in [0]:\n self.assertEqual(y.loc[(y.id_time == idt + dt) & (y.idl==16), 'id_sim'].values[0], df_count1.loc[idt, 'id_sim'])\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n" ]
[ [ "numpy.testing.assert_equal", "numpy.ones_like", "pandas.DataFrame", "numpy.testing.assert_almost_equal", "numpy.zeros_like", "numpy.testing.assert_allclose", "pandas.testing.assert_index_equal", "numpy.array", "numpy.zeros", "numpy.column_stack", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Nicholas-Chong/coronavirus_website
[ "b5ca436a01a447db4de9041442dbe39ca775b610" ]
[ "corona_website_app/management/commands/update_country_codes.py" ]
[ "from django.core.management.base import BaseCommand\nfrom corona_website_app.models import Country, Dates\nimport urllib.request\nimport pandas as pd\n\nclass Command(BaseCommand):\n def handle(self, *args, **kwargs):\n data = urllib.request.urlretrieve('https://raw.githubusercontent.com/lukes/ISO-3166-Countries-with-Regional-Codes/master/all/all.csv')\n data = pd.read_csv(data[0])\n data = data.drop(columns=['alpha-2', 'country-code', 'iso_3166-2', 'region', 'sub-region', 'intermediate-region', 'region-code', 'sub-region-code', 'intermediate-region-code'])\n\n # print(data.query('name == \"Afghanistan\"'))\n country_list = data.name.to_list()\n\n for country in Country.objects.all():\n if country.name in country_list:\n query_string = 'name == \"' + country.name + '\"'\n code_df = data.query(query_string)\n code = code_df.iloc[0, 1]\n country.country_code = code\n country.save()" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
worldbank/GOST_PublicGoods
[ "119e1f6a4baf7aaffecd732e5f59882be23424d5" ]
[ "GOSTNets/GOSTNets/LoadOSM.py" ]
[ "####################################################################################################\n# Load OSM data into network graph\n# Benjamin Stewart and Charles Fox\n# Purpose: take an input dataset as a OSM file and return a network object\n####################################################################################################\n\nimport os, sys, time\n\nimport shapely.ops\n\nimport geopandas as gpd\nimport pandas as pd\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nfrom osgeo import ogr\nfrom rtree import index\nfrom shapely import speedups\nfrom shapely.geometry import LineString, MultiLineString, MultiPoint, Point\nfrom geopy.distance import vincenty\nfrom boltons.iterutils import pairwise\nfrom shapely.wkt import loads,dumps\n\nclass OSM_to_network(object):\n\n def __init__(self, osmFile):\n ''' Generate a networkX object from a osm file\n '''\n self.osmFile = osmFile\n self.roads_raw = self.fetch_roads(osmFile)\n\n def generateRoadsGDF(self, in_df = None, outFile='', verbose = False):\n if type(in_df) != gpd.geodataframe.GeoDataFrame:\n in_df = self.roads_raw\n roads = self.get_all_intersections(in_df, verboseness = verbose)\n roads['key'] = ['edge_'+str(x+1) for x in range(len(roads))]\n np.arange(1,len(roads)+1,1)\n\n def get_nodes(x):\n return list(x.geometry.coords)[0],list(x.geometry.coords)[-1]\n\n nodes = gpd.GeoDataFrame(roads.apply(lambda x: get_nodes(x),axis=1).apply(pd.Series))\n nodes.columns = ['u','v']\n\n roads['length'] = roads.geometry.apply(lambda x : self.line_length(x))\n\n #G = ox.gdfs_to_graph(all_nodes,roads)\n roads.rename(columns={'geometry':'Wkt'}, inplace=True)\n\n roads = pd.concat([roads,nodes],axis=1)\n\n if outFile != '':\n roads.to_csv(outFile)\n\n self.roadsGPD = roads\n\n def filterRoads(self, acceptedRoads = ['primary','primary_link','secondary','secondary_link','motorway','motorway_link','trunk','trunk_link']):\n self.roads_raw = self.roads_raw.loc[self.roads_raw.infra_type.isin(acceptedRoads)]\n\n def fetch_roads(self, data_path):\n\n if data_path.split('.')[-1] == 'pbf':\n\n driver = ogr.GetDriverByName('OSM')\n data = driver.Open(data_path)\n\n sql_lyr = data.ExecuteSQL(\"SELECT osm_id,highway FROM lines WHERE highway IS NOT NULL\")\n\n roads=[]\n for feature in sql_lyr:\n if feature.GetField('highway') is not None:\n osm_id = feature.GetField('osm_id')\n shapely_geo = loads(feature.geometry().ExportToWkt())\n if shapely_geo is None:\n continue\n highway=feature.GetField('highway')\n roads.append([osm_id,highway,shapely_geo])\n\n if len(roads) > 0:\n road_gdf = gpd.GeoDataFrame(roads,columns=['osm_id','infra_type','geometry'],crs={'init': 'epsg:4326'})\n return road_gdf\n\n elif data_path.split('.')[-1] == 'shp':\n road_gdf = gpd.read_file(data_path)\n return road_gdf\n\n else:\n print('No roads found')\n\n def line_length(self, line, ellipsoid='WGS-84'):\n \"\"\"Length of a line in meters, given in geographic coordinates\n\n Adapted from https://gis.stackexchange.com/questions/4022/looking-for-a-pythonic-way-to-calculate-the-length-of-a-wkt-linestring#answer-115285\n\n Arguments:\n line {Shapely LineString} -- a shapely LineString object with WGS-84 coordinates\n ellipsoid {String} -- string name of an ellipsoid that `geopy` understands (see\n http://geopy.readthedocs.io/en/latest/#module-geopy.distance)\n\n Returns:\n Length of line in meters\n \"\"\"\n if line.geometryType() == 'MultiLineString':\n return sum(line_length(segment) for segment in line)\n\n return sum(\n vincenty(tuple(reversed(a)), tuple(reversed(b)), ellipsoid=ellipsoid).kilometers\n for a, b in pairwise(line.coords)\n )\n\n def get_all_intersections(self, shape_input, idx_osm = None, verboseness = False):\n # Initialize Rtree\n idx_inters = index.Index()\n # Load data\n #all_data = dict(zip(list(shape_input.osm_id),list(shape_input.geometry),list(shape_input.infra_type)))\n ### TODO - it shouldn't be necessary to reference the geometry column specifically\n # ... but here we are\n if idx_osm is None:\n idx_osm = shape_input['geometry'].sindex\n\n # Find all the intersecting lines to prepare for cutting\n count = 0\n tLength = shape_input.shape[0]\n inters_done = {}\n new_lines = []\n allCounts = []\n\n for idx, row in shape_input.iterrows():\n key1 = row.osm_id\n line = row.geometry\n infra_type = row.infra_type\n ### TIMING\n if count % 1000 == 0 and verboseness == True:\n print(\"Processing %s of %s\" % (count, tLength))\n count += 1\n #infra_line = line['infra_type']#shape_input.at[shape_input.index[shape_input['osm_id']==key1].tolist()[0],'infra_type']\n ### TIMING\n intersections = shape_input.iloc[list(idx_osm.intersection(line.bounds))]\n intersections = dict(zip(list(intersections.osm_id),list(intersections.geometry)))\n ### TIMING\n # Remove line1\n if key1 in intersections: intersections.pop(key1)\n # Find intersecting lines\n ### TIMING\n for key2,line2 in intersections.items():\n # Check that this intersection has not been recorded already\n if (key1, key2) in inters_done or (key2, key1) in inters_done:\n continue\n # Record that this intersection was saved\n inters_done[(key1, key2)] = True\n # Get intersection\n if line.intersects(line2):\n # Get intersection\n inter = line.intersection(line2)\n # Save intersecting point\n if \"Point\" == inter.type:\n idx_inters.insert(0, inter.bounds, inter)\n elif \"MultiPoint\" == inter.type:\n for pt in inter:\n idx_inters.insert(0, pt.bounds, pt)\n\n # cut lines where necessary and save all new linestrings to a list\n hits = [n.object for n in idx_inters.intersection(line.bounds, objects=True)]\n\n if len(hits) != 0:\n out = shapely.ops.split(line, MultiPoint(hits))\n new_lines.append([{'geometry': LineString(x), 'osm_id':key1,'infra_type':infra_type} for x in out.geoms])\n else:\n new_lines.append([{'geometry': line, 'osm_id':key1,\n 'infra_type':infra_type}])\n\n # Create one big list and treat all the cutted lines as unique lines\n flat_list = []\n all_data = {}\n\n #item for sublist in new_lines for item in sublist\n i = 1\n for sublist in new_lines:\n if sublist is not None:\n for item in sublist:\n item['id'] = i\n flat_list.append(item)\n i += 1\n all_data[i] = item\n\n # Transform into geodataframe and add coordinate system\n full_gpd = gpd.GeoDataFrame(flat_list,geometry ='geometry')\n full_gpd.crs = {'init' :'epsg:4326'}\n return(full_gpd)\n\n def initialReadIn(self, fpath=None):\n if isinstance(fpath, str):\n edges_1 = pd.read_csv(fpath)\n edges_1 = edges_1['Wkt'].apply(lambda x: loads(x))\n elif isinstance(fpath, gpd.GeoDataFrame):\n edges_1 = fpath\n else:\n edges_1 = self.roadsGPD\n edges = edges_1.copy()\n node_bunch = list(set(list(edges['u']) + list(edges['v'])))\n def convert(x):\n u = x.u\n v = x.v\n data = {'Wkt':x.Wkt,\n 'id':x.id,\n 'infra_type':x.infra_type,\n 'osm_id':x.osm_id,\n 'key': x.key,\n 'length':x.length}\n return (u, v, data)\n\n edge_bunch = edges.apply(lambda x: convert(x), axis = 1).tolist()\n G = nx.MultiDiGraph()\n G.add_nodes_from(node_bunch)\n G.add_edges_from(edge_bunch)\n for u, data in G.nodes(data = True):\n if type(u) == str:\n q = tuple(float(x) for x in u[1:-1].split(','))\n if type(u) == tuple:\n q = u\n data['x'] = q[0]\n data['y'] = q[1]\n G = nx.convert_node_labels_to_integers(G)\n self.network = G\n return G\n" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
om-henners/advent_of_code
[ "2c11272e05d7d1dcc5a96c9026d0f799f6443fa7" ]
[ "2018/day3/solution2.py" ]
[ "\"\"\"\nAmidst the chaos, you notice that exactly one claim doesn't overlap by even a\nsingle square inch of fabric with any other claim. If you can somehow draw\nattention to it, maybe the Elves will be able to make Santa's suit after all!\n\nFor example, in the claims above, only claim 3 is intact after all claims are\nmade.\n\nWhat is the ID of the only claim that doesn't overlap?\n\"\"\"\nimport numpy as np\nimport parse\n\nclaim_matcher = '''#{id:d} @ {x:d},{y:d}: {width:d}x{height:d}\\n'''\nfabric = np.zeros((1000, 1000), dtype=np.int)\n\n\nfor line in open('input.txt'):\n r = parse.parse(claim_matcher, line)\n claim = fabric[r['y']: r['y'] + r['height'], r['x']: r['x'] + r['width']]\n claim[:] = claim + 1\n\n\nfor line in open('input.txt'):\n r = parse.parse(claim_matcher, line)\n claim = fabric[r['y']: r['y'] + r['height'], r['x']: r['x'] + r['width']]\n\n if claim.max() == 1:\n print(r['id'])\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kubapok/human-motion-classification
[ "0979a66d8f554d3c929e2548b5415fa3cfe83256" ]
[ "classify_by_fuzzy_logic.py" ]
[ "import numpy as np\nimport cv2\nimport sys\nfrom ROI import ROI\nfrom fuzzy_classifier import FuzzyClassifier\n\ncap = cv2.VideoCapture(sys.argv[1])\n\nfgbg = cv2.bgsegm.createBackgroundSubtractorMOG()\n\nclassifier = FuzzyClassifier()\nclassifier.plot_variables()\n\ni = 0\nret, frame = cap.read()\nprvs = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\nVIDEO_WIDTH, VIDEO_HEIGHT = prvs.shape\nROIframe = np.zeros((VIDEO_WIDTH, VIDEO_HEIGHT), dtype=np.uint8)\nmask = np.zeros((VIDEO_WIDTH, VIDEO_HEIGHT), dtype=np.uint8)\nlindex, rindex, uindex, dindex, = 0, 10, 0, 10\ni = 0\nwhile(1):\n ret, frame = cap.read()\n\n fgmask = fgbg.apply(frame)\n if np.count_nonzero(fgmask) > VIDEO_WIDTH * VIDEO_HEIGHT / 3100: # 3100 is just parm tuned by experiments\n lindex, rindex, dindex, uindex = ROI.get_POI_corners(fgmask, 0.02)\n if lindex and rindex and dindex and uindex:\n mask = np.zeros((VIDEO_WIDTH, VIDEO_HEIGHT), dtype=np.uint8)\n mask[dindex: uindex, lindex:rindex] = 255\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ROIframe_gray = cv2.bitwise_and(frame_gray, frame_gray, mask=mask)\n flow = cv2.calcOpticalFlowFarneback(\n prvs, ROIframe_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n ROIonly_flow_gray = flow[dindex: uindex, lindex:rindex]\n vertical_movement, horizontal_movement = cv2.split(\n ROIonly_flow_gray)\n\n #***\n height = uindex - dindex\n width = rindex - lindex\n data = (vertical_movement.sum(),\n horizontal_movement.sum(), height, width)\n motion = classifier.classify(data)\n print(vertical_movement.sum(), '\\t', horizontal_movement.sum(),\n '\\t', height, '\\t', width, '\\t', motion)\n\n prvs = ROIframe_gray\n i += 1\n cv2.line(frame, (400, 100), (400 + int(vertical_movement.sum() / 1000),\n 100 + int(horizontal_movement.sum() / 1000)), (255, 0, 0), 5)\n cv2.line(frame, (400, 100), (400, 100), (0, 0, 255), 5)\n if i == 60:\n pass\n cv2.rectangle(frame, (lindex, dindex),\n (rindex, uindex), (0, 255, 0), 3)\n\n cv2.imshow('frame', frame)\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.zeros", "numpy.count_nonzero" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sdrees/hvplot
[ "0a78246eecc826d4061b9462831763db39111ccd" ]
[ "hvplot/tests/testoptions.py" ]
[ "import hvplot\nimport holoviews as hv\nimport numpy as np\nimport pytest\n\nfrom holoviews import Store\nfrom holoviews.core.options import Options, OptionTree\n\ntry:\n import pandas as pd\nexcept ImportError:\n pd = None\n\ntry:\n import xarray as xr\nexcept ImportError:\n xr = None\n\n\[email protected](scope='class')\ndef load_pandas_accessor():\n import hvplot.pandas # noqa\n\n\[email protected](scope='class')\ndef load_xarray_accessor():\n import hvplot.xarray # noqa\n\n\[email protected](params=['bokeh', 'matplotlib', 'plotly'], scope='class')\ndef backend(request):\n backend = request.param\n backend_copy = Store.current_backend\n if backend not in Store.registry:\n hvplot.extension(backend, compatibility='bokeh')\n Store.set_current_backend(backend)\n store_copy = OptionTree(sorted(Store.options().items()),\n groups=Options._option_groups)\n yield backend\n Store.options(val=store_copy)\n Store._custom_options = {k:{} for k in Store._custom_options.keys()}\n Store.set_current_backend(backend_copy)\n\n\[email protected](scope='module')\ndef df():\n return pd.DataFrame([[1, 2, 'A', 0.1], [3, 4, 'B', 0.2], [5, 6, 'C', 0.3]],\n columns=['x', 'y', 'category', 'number'])\n\n\[email protected](scope='module')\ndef symmetric_df():\n return pd.DataFrame([[1, 2, -1], [3, 4, 0], [5, 6, 1]],\n columns=['x', 'y', 'number'])\n\n\[email protected](pd is None, reason='Pandas not available')\[email protected]('load_pandas_accessor')\nclass TestOptions:\n\n @pytest.mark.parametrize(\n 'backend',\n [\n 'bokeh',\n pytest.param('matplotlib', marks=pytest.mark.xfail(reason='legend_position not supported w/ matplotlib for scatter')),\n pytest.param('plotly', marks=pytest.mark.xfail(reason='legend_position not supported w/ plotly for scatter')),\n ],\n indirect=True\n )\n def test_scatter_legend_position(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='category', legend='left')\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['legend_position'] == 'left'\n\n @pytest.mark.parametrize(\n 'backend',\n [\n 'bokeh',\n 'matplotlib',\n pytest.param('plotly', marks=pytest.mark.xfail(reason='legend_position not supported w/ plotly for hist')),\n ],\n indirect=True\n )\n def test_histogram_by_category_legend_position(self, df, backend):\n plot = df.hvplot.hist('y', by='category', legend='left')\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['legend_position'] == 'left'\n\n @pytest.mark.parametrize('kind', ['scatter', 'points'])\n def test_logz(self, df, kind, backend):\n plot = df.hvplot('x', 'y', c='x', logz=True, kind=kind)\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['logz'] is True\n\n\n @pytest.mark.parametrize('kind', ['scatter', 'points'])\n def test_color_dim(self, df, kind, backend):\n plot = df.hvplot('x', 'y', c='number', kind=kind)\n opts = Store.lookup_options(backend, plot, 'style')\n assert opts.kwargs['color'] == 'number'\n assert 'number' in plot.vdims\n\n @pytest.mark.parametrize('kind', ['scatter', 'points'])\n def test_size_dim(self, df, kind, backend):\n plot = df.hvplot('x', 'y', s='number', kind=kind)\n opts = Store.lookup_options(backend, plot, 'style')\n if backend in ['bokeh', 'plotly']:\n param = 'size'\n elif backend == 'matplotlib':\n param = 's'\n assert opts.kwargs[param] == 'number'\n assert 'number' in plot.vdims\n\n @pytest.mark.parametrize(\n 'backend',\n [\n 'bokeh',\n pytest.param('matplotlib', marks=pytest.mark.xfail(reason='cannot map a dim to alpha w/ matplotlib')),\n pytest.param('plotly', marks=pytest.mark.xfail(reason='cannot map a dim to alpha w/ plotly')),\n ],\n indirect=True\n )\n @pytest.mark.parametrize('kind', ['scatter', 'points'])\n def test_alpha_dim(self, df, kind, backend):\n plot = df.hvplot('x', 'y', alpha='number', kind=kind)\n opts = Store.lookup_options(backend, plot, 'style')\n assert opts.kwargs['alpha'] == 'number'\n assert 'number' in plot.vdims\n # Special matplotlib code to trigger an error that happens on render\n if backend == 'matplotlib':\n mpl_renderer = hv.Store.renderers['matplotlib']\n mpl_renderer.get_plot(plot)\n\n @pytest.mark.parametrize('kind', ['scatter', 'points'])\n def test_marker_dim(self, df, kind, backend):\n plot = df.hvplot('x', 'y', marker='category', kind=kind)\n opts = Store.lookup_options(backend, plot, 'style')\n assert opts.kwargs['marker'] == 'category'\n assert 'category' in plot.vdims\n\n @pytest.mark.parametrize('kind', ['scatter', 'points'])\n def test_color_dim_overlay(self, df, kind, backend):\n plot = df.hvplot('x', 'y', c='number', by='category', kind=kind)\n opts = Store.lookup_options(backend, plot.last, 'style')\n assert opts.kwargs['color'] == 'number'\n assert 'number' in plot.last.vdims\n\n @pytest.mark.parametrize('kind', ['scatter', 'points'])\n def test_size_dim_overlay(self, df, kind, backend):\n plot = df.hvplot('x', 'y', s='number', by='category', kind=kind)\n opts = Store.lookup_options(backend, plot.last, 'style')\n if backend in ['bokeh', 'plotly']:\n param = 'size'\n elif backend == 'matplotlib':\n param = 's'\n assert opts.kwargs[param] == 'number'\n assert 'number' in plot.last.vdims\n\n @pytest.mark.parametrize(\n 'backend',\n [\n 'bokeh',\n 'matplotlib',\n pytest.param('plotly', marks=pytest.mark.xfail(reason='cannot map a dim to alpha w/ plotly')),\n ],\n indirect=True\n )\n @pytest.mark.parametrize('kind', ['scatter', 'points'])\n def test_alpha_dim_overlay(self, df, kind, backend):\n plot = df.hvplot('x', 'y', alpha='number', by='category', kind=kind)\n opts = Store.lookup_options(backend, plot.last, 'style')\n assert opts.kwargs['alpha'] == 'number'\n assert 'number' in plot.last.vdims\n\n def test_hvplot_defaults(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='category')\n opts = Store.lookup_options(backend, plot, 'plot')\n if backend == 'bokeh':\n assert opts.kwargs['height'] == 300\n assert opts.kwargs['width'] == 700\n elif backend == 'matplotlib':\n assert opts.kwargs['aspect'] == pytest.approx(2.333333)\n assert opts.kwargs['fig_size'] == pytest.approx(233.333333)\n if backend == 'bokeh':\n assert opts.kwargs['responsive'] is False\n assert opts.kwargs['shared_axes'] is True\n # legend_position shouldn't only be for Bokeh\n assert opts.kwargs['legend_position'] == 'right'\n assert opts.kwargs['show_grid'] is False\n assert opts.kwargs['show_legend'] is True\n assert opts.kwargs['logx'] is False\n assert opts.kwargs['logy'] is False\n assert opts.kwargs.get('logz') is None\n\n @pytest.mark.parametrize(\n 'backend',\n [\n 'bokeh',\n pytest.param('matplotlib', marks=pytest.mark.xfail(reason='default opts not supported w/ matplotlib')),\n pytest.param('plotly', marks=pytest.mark.xfail(reason='default opts not supported w/ plotly')),\n ],\n indirect=True\n )\n def test_holoviews_defined_default_opts(self, df, backend):\n hv.opts.defaults(hv.opts.Scatter(height=400, width=900, show_grid=True))\n plot = df.hvplot.scatter('x', 'y', c='category')\n opts = Store.lookup_options(backend, plot, 'plot')\n # legend_position shouldn't apply only to bokeh\n if backend == 'bokeh':\n assert opts.kwargs['legend_position'] == 'right'\n assert opts.kwargs['show_grid'] is True\n assert opts.kwargs['height'] == 400\n assert opts.kwargs['width'] == 900\n\n @pytest.mark.parametrize(\n 'backend',\n [\n 'bokeh',\n pytest.param('matplotlib', marks=pytest.mark.xfail(reason='default opts not supported w/ matplotlib')),\n pytest.param('plotly', marks=pytest.mark.xfail(reason='default opts not supported w/ plotly')),\n ],\n indirect=True\n )\n def test_holoviews_defined_default_opts_overwritten_in_call(self, df, backend):\n hv.opts.defaults(hv.opts.Scatter(height=400, width=900, show_grid=True))\n plot = df.hvplot.scatter('x', 'y', c='category', width=300, legend='left')\n opts = Store.lookup_options(backend, plot, 'plot')\n # legend_position shouldn't apply only to bokeh\n if backend == 'bokeh':\n assert opts.kwargs['legend_position'] == 'left'\n assert opts.kwargs['show_grid'] is True\n assert opts.kwargs['height'] == 400\n assert opts.kwargs['width'] == 300\n\n @pytest.mark.parametrize(\n 'backend',\n [\n 'bokeh',\n pytest.param('matplotlib', marks=pytest.mark.xfail(reason='default opts not supported not supported w/ matplotlib')),\n pytest.param('plotly', marks=pytest.mark.xfail(reason='default opts not supported not supported w/ plotly')),\n ],\n indirect=True\n )\n def test_holoviews_defined_default_opts_are_not_mutable(self, df, backend):\n hv.opts.defaults(hv.opts.Scatter(tools=['tap']))\n plot = df.hvplot.scatter('x', 'y', c='category')\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['tools'] == ['tap', 'hover']\n default_opts = Store.options(backend=backend)['Scatter'].groups['plot'].options\n assert default_opts['tools'] == ['tap']\n\n def test_axis_set_to_visible_by_default(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='category')\n opts = Store.lookup_options(backend, plot, 'plot')\n assert 'xaxis' not in opts.kwargs\n assert 'yaxis' not in opts.kwargs\n\n def test_axis_set_to_none(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='category', xaxis=None, yaxis=None)\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['xaxis'] is None\n assert opts.kwargs['yaxis'] is None\n\n def test_axis_set_to_false(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='category', xaxis=False, yaxis=False)\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['xaxis'] is None\n assert opts.kwargs['yaxis'] is None\n\n def test_axis_set_to_none_in_holoviews_opts_default(self, df, backend):\n hv.opts.defaults(hv.opts.Scatter(xaxis=None, yaxis=None))\n plot = df.hvplot.scatter('x', 'y', c='category')\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['xaxis'] is None\n assert opts.kwargs['yaxis'] is None\n\n @pytest.mark.xfail\n def test_axis_set_to_none_in_holoviews_opts_default_overwrite_in_call(self, df, backend):\n hv.opts.defaults(hv.opts.Scatter(xaxis=None, yaxis=None))\n plot = df.hvplot.scatter('x', 'y', c='category', xaxis=True, yaxis=True)\n opts = Store.lookup_options(backend, plot, 'plot')\n assert 'xaxis' not in opts.kwargs\n assert 'yaxis' not in opts.kwargs\n\n def test_loglog_opts(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='category', loglog=True)\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['logx'] is True\n assert opts.kwargs['logy'] is True\n assert opts.kwargs.get('logz') is None\n\n def test_logy_opts(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='category', logy=True)\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['logx'] is False\n assert opts.kwargs['logy'] is True\n assert opts.kwargs.get('logz') is None\n\n @pytest.mark.parametrize(\n 'backend',\n [\n 'bokeh',\n pytest.param('matplotlib', marks=pytest.mark.xfail(reason='default opts not supported w/ matplotlib')),\n pytest.param('plotly', marks=pytest.mark.xfail(reason='defaykt opts not supported w/ plotly')),\n ],\n indirect=True\n )\n def test_holoviews_defined_default_opts_logx(self, df, backend):\n hv.opts.defaults(hv.opts.Scatter(logx=True))\n plot = df.hvplot.scatter('x', 'y', c='category')\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['logx'] is True\n assert opts.kwargs['logy'] is False\n assert opts.kwargs.get('logz') is None\n\n def test_holoviews_defined_default_opts_logx_overwritten_in_call(self, df, backend):\n hv.opts.defaults(hv.opts.Scatter(logx=True))\n plot = df.hvplot.scatter('x', 'y', c='category', logx=False)\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['logx'] is False\n assert opts.kwargs['logy'] is False\n assert opts.kwargs.get('logz') is None\n\n def test_hvplot_default_cat_cmap_opts(self, df, backend):\n import colorcet as cc\n plot = df.hvplot.scatter('x', 'y', c='category')\n opts = Store.lookup_options(backend, plot, 'style')\n assert opts.kwargs['cmap'] == cc.palette['glasbey_category10']\n\n def test_hvplot_default_num_cmap_opts(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='number')\n opts = Store.lookup_options(backend, plot, 'style')\n assert opts.kwargs['cmap'] == 'kbc_r'\n\n def test_cmap_opts_by_type(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='number', cmap='diverging')\n opts = Store.lookup_options(backend, plot, 'style')\n assert opts.kwargs['cmap'] == 'coolwarm'\n\n def test_cmap_opts_by_name(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='number', cmap='fire')\n opts = Store.lookup_options(backend, plot, 'style')\n assert opts.kwargs['cmap'] == 'fire'\n\n def test_colormap_opts_by_name(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='number', colormap='fire')\n opts = Store.lookup_options(backend, plot, 'style')\n assert opts.kwargs['cmap'] == 'fire'\n\n def test_cmap_opts_as_a_list(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='number', cmap=['red', 'blue', 'green'])\n opts = Store.lookup_options(backend, plot, 'style')\n assert opts.kwargs['cmap'] == ['red', 'blue', 'green']\n\n @pytest.mark.parametrize(\n ('opt', 'backend'),\n [\n ('aspect', 'bokeh'),\n ('aspect', 'matplotlib'),\n ('aspect', 'plotly'),\n ('data_aspect', 'bokeh'),\n ('data_aspect', 'matplotlib'),\n pytest.param(\n 'data_aspect', 'plotly',\n marks=pytest.mark.xfail(reason='data_aspect not supported w/ plotly')\n ),\n ],\n indirect=['backend']\n )\n def test_aspect(self, df, opt, backend):\n plot = df.hvplot(x='x', y='y', **{opt: 2})\n opts = Store.lookup_options(backend, plot, 'plot').kwargs\n assert opts[opt] == 2\n if backend in ['bokeh', 'matplotlib']:\n assert opts.get('width') is None\n assert opts.get('height') is None\n elif backend == 'matplotlib':\n assert opts.get('fig_size') is None\n\n @pytest.mark.parametrize(\n ('opt', 'backend'),\n [\n ('aspect', 'bokeh'),\n ('aspect', 'matplotlib'),\n ('aspect', 'plotly'),\n ('data_aspect', 'bokeh'),\n ('data_aspect', 'matplotlib'),\n pytest.param(\n 'data_aspect', 'plotly',\n marks=pytest.mark.xfail(reason='data_aspect not supported w/ plotly')\n ),\n ],\n indirect=['backend']\n )\n def test_aspect_and_width(self, df, opt, backend):\n plot = df.hvplot(x='x', y='y', width=150, **{opt: 2})\n opts = hv.Store.lookup_options(backend, plot, 'plot').kwargs\n assert opts[opt] == 2\n if backend in ['bokeh', 'plotly']:\n assert opts.get('width') == 150\n assert opts.get('height') is None\n elif backend == 'matplotlib':\n assert opts.get('fig_size') == pytest.approx(50.0)\n\n def test_symmetric_dataframe(self, backend):\n import pandas as pd\n df = pd.DataFrame([[1, 2, -1], [3, 4, 0], [5, 6, 1]],\n columns=['x', 'y', 'number'])\n plot = df.hvplot.scatter('x', 'y', c='number')\n plot_opts = Store.lookup_options(backend, plot, 'plot')\n assert plot_opts.kwargs['symmetric'] is True\n style_opts = Store.lookup_options(backend, plot, 'style')\n assert style_opts.kwargs['cmap'] == 'coolwarm'\n\n def test_symmetric_is_deduced_dataframe(self, symmetric_df, backend):\n plot = symmetric_df.hvplot.scatter('x', 'y', c='number')\n plot_opts = Store.lookup_options(backend, plot, 'plot')\n assert plot_opts.kwargs['symmetric'] is True\n style_opts = Store.lookup_options(backend, plot, 'style')\n assert style_opts.kwargs['cmap'] == 'coolwarm'\n\n def test_symmetric_from_opts(self, df, backend):\n plot = df.hvplot.scatter('x', 'y', c='number', symmetric=True)\n plot_opts = Store.lookup_options(backend, plot, 'plot')\n assert plot_opts.kwargs['symmetric'] is True\n style_opts = Store.lookup_options(backend, plot, 'style')\n assert style_opts.kwargs['cmap'] == 'coolwarm'\n\n def test_symmetric_from_opts_does_not_deduce(self, symmetric_df, backend):\n plot = symmetric_df.hvplot.scatter('x', 'y', c='number', symmetric=False)\n plot_opts = Store.lookup_options(backend, plot, 'plot')\n assert plot_opts.kwargs['symmetric'] is False\n style_opts = Store.lookup_options(backend, plot, 'style')\n assert style_opts.kwargs['cmap'] == 'kbc_r'\n\n def test_if_clim_is_set_symmetric_is_not_deduced(self, symmetric_df, backend):\n plot = symmetric_df.hvplot.scatter('x', 'y', c='number', clim=(-1,1))\n plot_opts = Store.lookup_options(backend, plot, 'plot')\n assert plot_opts.kwargs.get('symmetric') is None\n style_opts = Store.lookup_options(backend, plot, 'style')\n assert style_opts.kwargs['cmap'] == 'kbc_r'\n\n @pytest.mark.parametrize(\n 'backend',\n [\n 'bokeh',\n 'matplotlib',\n pytest.param(\n 'plotly',\n marks=pytest.mark.xfail(\n reason='bandwidth, cut, levels not supported w/ plotly for bivariate'\n )\n ),\n ],\n indirect=True\n )\n def test_bivariate_opts(self, df, backend):\n plot = df.hvplot.bivariate('x', 'y', bandwidth=0.2, cut=1, levels=5, filled=True)\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['bandwidth'] == 0.2\n assert opts.kwargs['cut'] == 1\n assert opts.kwargs['levels'] == 5\n assert opts.kwargs['filled'] is True\n\n def test_kde_opts(self, df, backend):\n plot = df.hvplot.kde('x', bandwidth=0.2, cut=1, filled=True)\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['bandwidth'] == 0.2\n assert opts.kwargs['cut'] == 1\n assert opts.kwargs['filled'] is True\n\n\[email protected](scope='module')\ndef da():\n return xr.DataArray(\n data=np.arange(16).reshape((2, 2, 2, 2)),\n coords={'time': [0, 1], 'y': [0, 1], 'x': [0, 1], 'band': [0, 1]},\n dims=['time', 'y', 'x', 'band'],\n name='test',\n )\n\n\[email protected](scope='module')\ndef da2():\n return xr.DataArray(\n data=np.arange(27).reshape((3, 3, 3)),\n coords={'y': [0, 1, 2], 'x': [0, 1, 2]},\n dims=['y', 'x', 'other'],\n name='test2'\n )\n\n\[email protected](scope='module')\ndef ds1(da):\n return xr.Dataset(dict(foo=da))\n\n\[email protected](scope='module')\ndef ds2(da, da2):\n return xr.Dataset(dict(foo=da, bar=da2))\n\n\[email protected](xr is None, reason='Xarray not available')\[email protected]('load_xarray_accessor')\nclass TestXarrayTitle:\n\n def test_dataarray_2d_with_title(self, da, backend):\n da_sel = da.sel(time=0, band=0)\n plot = da_sel.hvplot() # Image plot\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['title'] == 'time = 0, band = 0'\n\n def test_dataarray_1d_with_title(self, da, backend):\n da_sel = da.sel(time=0, band=0, x=0)\n plot = da_sel.hvplot() # Line plot\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['title'] == 'time = 0, x = 0, band = 0'\n\n def test_dataarray_1d_and_by_with_title(self, da, backend):\n da_sel = da.sel(time=0, band=0, x=[0, 1])\n plot = da_sel.hvplot(by='x') # Line plot with hue/by\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['title'] == 'time = 0, band = 0'\n\n def test_override_title(self, da, backend):\n da_sel = da.sel(time=0, band=0)\n plot = da_sel.hvplot(title='title') # Image plot\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['title'] == 'title'\n\n def test_dataarray_4d_line_no_title(self, da, backend):\n plot = da.hvplot.line(dynamic=False) # Line plot with widgets\n opts = Store.lookup_options(backend, plot.last, 'plot')\n assert 'title' not in opts.kwargs\n\n def test_dataarray_3d_histogram_with_title(self, da, backend):\n da_sel = da.sel(time=0)\n plot = da_sel.hvplot() # Histogram and no widgets\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['title'] == 'time = 0'\n\n def test_dataset_empty_raises(self, ds1, backend):\n with pytest.raises(ValueError, match='empty xarray.Dataset'):\n ds1.drop_vars('foo').hvplot()\n\n def test_dataset_one_var_behaves_like_dataarray(self, ds1, backend):\n ds_sel = ds1.sel(time=0, band=0)\n plot = ds_sel.hvplot() # Image plot\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['title'] == 'time = 0, band = 0'\n\n def test_dataset_scatter_with_title(self, ds2, backend):\n ds_sel = ds2.sel(time=0, band=0, x=0, y=0)\n plot = ds_sel.hvplot.scatter(x='foo', y='bar') # Image plot\n opts = Store.lookup_options(backend, plot, 'plot')\n assert opts.kwargs['title'] == 'y = 0, x = 0, time = 0, band = 0'\n" ]
[ [ "numpy.arange", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jacobmas/btrdb-python
[ "a5a897c3822668c84a456677479a81838e729e15" ]
[ "btrdb/transformers.py" ]
[ "# btrdb.transformers\n# Value transformation utilities\n#\n# Author: PingThings\n# Created: Fri Dec 21 14:57:30 2018 -0500\n#\n# For license information, see LICENSE.txt\n# ID: transformers.py [] [email protected] $\n\n\"\"\"\nValue transformation utilities\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport csv\nimport contextlib\nfrom collections import OrderedDict\nfrom warnings import warn\n\n##########################################################################\n## Helper Functions\n##########################################################################\n\n_STAT_PROPERTIES = ('min', 'mean', 'max', 'count', 'stddev')\n\ndef _get_time_from_row(row):\n for item in row:\n if item: return item.time\n raise Exception(\"Row contains no data\")\n\n\ndef _stream_names(streamset, func):\n \"\"\"\n private convenience function to come up with proper final stream names\n before sending a collection of streams (dataframe, etc.) back to the\n user.\n \"\"\"\n return tuple(\n func(s) for s in streamset._streams\n )\n\n\n##########################################################################\n## Transform Functions\n##########################################################################\n\ndef to_series(streamset, datetime64_index=True, agg=\"mean\", name_callable=None):\n \"\"\"\n Returns a list of Pandas Series objects indexed by time\n\n Parameters\n ----------\n datetime64_index: bool\n Directs function to convert Series index to np.datetime64[ns] or\n leave as np.int64.\n\n agg : str, default: \"mean\"\n Specify the StatPoint field (e.g. aggregating function) to create the Series\n from. Must be one of \"min\", \"mean\", \"max\", \"count\", or \"stddev\". This\n argument is ignored if RawPoint values are passed into the function.\n\n name_callable : lambda, default: lambda s: s.collection + \"/\" + s.name\n Sprecify a callable that can be used to determine the series name given a\n Stream object.\n\n \"\"\"\n try:\n import pandas as pd\n except ImportError:\n raise ImportError(\"Please install Pandas to use this transformation function.\")\n\n # TODO: allow this at some future point\n if agg == \"all\":\n raise AttributeError(\"cannot use 'all' as aggregate at this time\")\n\n if not callable(name_callable):\n name_callable = lambda s: s.collection + \"/\" + s.name\n\n\n result = []\n stream_names = _stream_names(streamset, name_callable)\n\n for idx, output in enumerate(streamset.values()):\n times, values = [], []\n for point in output:\n times.append(point.time)\n if point.__class__.__name__ == \"RawPoint\":\n values.append(point.value)\n else:\n values.append(getattr(point, agg))\n\n if datetime64_index:\n times = pd.Index(times, dtype='datetime64[ns]')\n\n result.append(pd.Series(\n data=values, index=times, name=stream_names[idx]\n ))\n return result\n\n\ndef to_dataframe(streamset, columns=None, agg=\"mean\", name_callable=None):\n \"\"\"\n Returns a Pandas DataFrame object indexed by time and using the values of a\n stream for each column.\n\n Parameters\n ----------\n columns: sequence\n column names to use for DataFrame. Deprecated and not compatible with name_callable.\n\n agg : str, default: \"mean\"\n Specify the StatPoint field (e.g. aggregating function) to create the Series\n from. Must be one of \"min\", \"mean\", \"max\", \"count\", \"stddev\", or \"all\". This\n argument is ignored if not using StatPoints.\n\n name_callable : lambda, default: lambda s: s.collection + \"/\" + s.name\n Sprecify a callable that can be used to determine the series name given a\n Stream object. This is not compatible with agg == \"all\" at this time\n\n\n \"\"\"\n try:\n import pandas as pd\n except ImportError:\n raise ImportError(\"Please install Pandas to use this transformation function.\")\n\n # deprecation warning added in v5.8\n if columns:\n warn(\"the columns argument is deprecated and will be removed in a future release\", DeprecationWarning, stacklevel=2)\n\n # TODO: allow this at some future point\n if agg == \"all\" and name_callable is not None:\n raise AttributeError(\"cannot provide name_callable when using 'all' as aggregate at this time\")\n\n # do not allow agg=\"all\" with RawPoints\n if agg == \"all\" and streamset.allow_window:\n agg=\"\"\n\n # default arg values\n if not callable(name_callable):\n name_callable = lambda s: s.collection + \"/\" + s.name\n\n\n df = pd.DataFrame(to_dict(streamset,agg=agg))\n\n if not df.empty:\n df = df.set_index(\"time\")\n\n if agg == \"all\" and not streamset.allow_window:\n stream_names = [[s.collection, s.name, prop] for s in streamset._streams for prop in _STAT_PROPERTIES]\n df.columns=pd.MultiIndex.from_tuples(stream_names)\n else:\n df.columns = columns if columns else _stream_names(streamset, name_callable)\n\n return df\n\n\ndef to_array(streamset, agg=\"mean\"):\n \"\"\"\n Returns a multidimensional numpy array (similar to a list of lists) containing point\n classes.\n\n Parameters\n ----------\n agg : str, default: \"mean\"\n Specify the StatPoint field (e.g. aggregating function) to return for the\n arrays. Must be one of \"min\", \"mean\", \"max\", \"count\", or \"stddev\". This\n argument is ignored if RawPoint values are passed into the function.\n\n \"\"\"\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\"Please install Numpy to use this transformation function.\")\n\n # TODO: allow this at some future point\n if agg == \"all\":\n raise AttributeError(\"cannot use 'all' as aggregate at this time\")\n\n results = []\n for points in streamset.values():\n segment = []\n for point in points:\n if point.__class__.__name__ == \"RawPoint\":\n segment.append(point.value)\n else:\n segment.append(getattr(point, agg))\n results.append(segment)\n return np.array(results)\n\n\ndef to_dict(streamset, agg=\"mean\", name_callable=None):\n \"\"\"\n Returns a list of OrderedDict for each time code with the appropriate\n stream data attached.\n\n Parameters\n ----------\n agg : str, default: \"mean\"\n Specify the StatPoint field (e.g. aggregating function) to constrain dict\n keys. Must be one of \"min\", \"mean\", \"max\", \"count\", or \"stddev\". This\n argument is ignored if RawPoint values are passed into the function.\n\n name_callable : lambda, default: lambda s: s.collection + \"/\" + s.name\n Sprecify a callable that can be used to determine the series name given a\n Stream object.\n\n \"\"\"\n if not callable(name_callable):\n name_callable = lambda s: s.collection + \"/\" + s.name\n\n data = []\n stream_names = _stream_names(streamset, name_callable)\n\n for row in streamset.rows():\n item = OrderedDict({\n \"time\": _get_time_from_row(row),\n })\n for idx, col in enumerate(stream_names):\n if row[idx].__class__.__name__ == \"RawPoint\":\n item[col] = row[idx].value if row[idx] else None\n else:\n if agg == \"all\":\n for stat in _STAT_PROPERTIES:\n item[\"{}-{}\".format(col, stat)] = getattr(row[idx], stat) if row[idx] else None\n else:\n item[col] = getattr(row[idx], agg) if row[idx] else None\n data.append(item)\n return data\n\n\ndef to_csv(streamset, fobj, dialect=None, fieldnames=None, agg=\"mean\", name_callable=None):\n \"\"\"\n Saves stream data as a CSV file.\n\n Parameters\n ----------\n fobj: str or file-like object\n Path to use for saving CSV file or a file-like object to use to write to.\n\n dialect: csv.Dialect\n CSV dialect object from Python csv module. See Python's csv module for\n more information.\n\n fieldnames: sequence\n A sequence of strings to use as fieldnames in the CSV header. See\n Python's csv module for more information.\n\n agg : str, default: \"mean\"\n Specify the StatPoint field (e.g. aggregating function) to return when\n limiting results. Must be one of \"min\", \"mean\", \"max\", \"count\", or \"stddev\".\n This argument is ignored if RawPoint values are passed into the function.\n\n name_callable : lambda, default: lambda s: s.collection + \"/\" + s.name\n Sprecify a callable that can be used to determine the series name given a\n Stream object.\n \"\"\"\n\n # TODO: allow this at some future point\n if agg == \"all\":\n raise AttributeError(\"cannot use 'all' as aggregate at this time\")\n\n if not callable(name_callable):\n name_callable = lambda s: s.collection + \"/\" + s.name\n\n @contextlib.contextmanager\n def open_path_or_file(path_or_file):\n if isinstance(path_or_file, str):\n f = file_to_close = open(path_or_file, 'w', newline='')\n else:\n f = path_or_file\n file_to_close = None\n try:\n yield f\n finally:\n if file_to_close:\n file_to_close.close()\n\n with open_path_or_file(fobj) as csvfile:\n stream_names = _stream_names(streamset, name_callable)\n fieldnames = fieldnames if fieldnames else [\"time\"] + list(stream_names)\n\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect=dialect)\n writer.writeheader()\n\n for item in to_dict(streamset, agg=agg):\n writer.writerow(item)\n\n\ndef to_table(streamset, agg=\"mean\", name_callable=None):\n \"\"\"\n Returns string representation of the data in tabular form using the tabulate\n library.\n\n Parameters\n ----------\n agg : str, default: \"mean\"\n Specify the StatPoint field (e.g. aggregating function) to create the Series\n from. Must be one of \"min\", \"mean\", \"max\", \"count\", or \"stddev\". This\n argument is ignored if RawPoint values are passed into the function.\n\n name_callable : lambda, default: lambda s: s.collection + \"/\" + s.name\n Sprecify a callable that can be used to determine the column name given a\n Stream object.\n\n \"\"\"\n try:\n from tabulate import tabulate\n except ImportError:\n raise ImportError(\"Please install tabulate to use this transformation function.\")\n\n # TODO: allow this at some future point\n if agg == \"all\":\n raise AttributeError(\"cannot use 'all' as aggregate at this time\")\n\n if not callable(name_callable):\n name_callable = lambda s: s.collection + \"/\" + s.name\n\n return tabulate(streamset.to_dict(agg=agg, name_callable=name_callable), headers=\"keys\")\n\n\n##########################################################################\n## Transform Classes\n##########################################################################\n\nclass StreamSetTransformer(object):\n \"\"\"\n Base class for StreamSet or Stream transformations\n \"\"\"\n to_dict = to_dict\n to_array = to_array\n to_series = to_series\n to_dataframe = to_dataframe\n\n to_csv = to_csv\n to_table = to_table\n" ]
[ [ "pandas.MultiIndex.from_tuples", "numpy.array", "pandas.Index", "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
kuc2477/tensorflow-infogan
[ "fde80e9f7b6c55548e8879090868c3d020ecba49" ]
[ "train.py" ]
[ "import tensorflow as tf\nfrom tqdm import tqdm\nimport utils\nfrom data import DATASETS, DATASET_LENGTH_GETTERS\n\n\ndef train(model, config, session=None):\n # define session if needed.\n session = session or tf.Session()\n\n # define summaries.\n summary_writer = tf.summary.FileWriter(config.log_dir, session.graph)\n image_summary = tf.summary.image(\n 'generated images', model.g, max_outputs=8\n )\n statistics_summaries = tf.summary.merge([\n tf.summary.scalar('discriminator loss', model.d_loss),\n tf.summary.scalar(\n 'estimated mutual information between c and c|g',\n model.estimated_mutual_information_between_c_and_c_given_g\n ),\n tf.summary.histogram(\n 'estimated parameters of distribution c|g',\n model.estimated_parameters_of_distribution_c_given_g\n )\n ])\n\n # define optimizers\n d_trainer = tf.train.AdamOptimizer(\n learning_rate=config.learning_rate,\n beta1=config.beta1\n )\n g_trainer = tf.train.AdamOptimizer(\n learning_rate=config.learning_rate,\n beta1=config.beta1,\n )\n\n # define parameter update tasks\n d_grads = d_trainer.compute_gradients(model.d_loss, var_list=(\n model.d_vars + model.q_vars\n ))\n g_grads = g_trainer.compute_gradients(model.g_loss, var_list=(\n model.g_vars + model.q_vars\n ))\n update_d = d_trainer.apply_gradients(d_grads)\n update_g = g_trainer.apply_gradients(g_grads)\n\n # main training session context\n with session:\n if config.resume:\n epoch_start = (\n utils.load_checkpoint(session, model, config)\n // DATASET_LENGTH_GETTERS[config.dataset]()\n ) + 1\n else:\n epoch_start = 1\n session.run(tf.global_variables_initializer())\n\n for epoch in range(epoch_start, config.epochs+1):\n dataset = DATASETS[config.dataset](config.batch_size)\n dataset_length = DATASET_LENGTH_GETTERS[config.dataset]()\n dataset_stream = tqdm(enumerate(dataset, 1))\n\n for batch_index, xs in dataset_stream:\n # where are we?\n iteration = (epoch-1)*dataset_length + batch_index\n\n # run the discriminator trainer.\n zs_d, cs_d = session.run([\n model.z_distribution.sample_prior(config.batch_size),\n model.c_distribution.sample_prior(config.batch_size)\n ])\n _, d_loss, estimated_mutual_information = session.run([\n update_d, model.d_loss,\n model.estimated_mutual_information_between_c_and_c_given_g,\n ], feed_dict={\n model.z_in: zs_d,\n model.c_in: cs_d,\n model.image_in: xs\n })\n\n # run the generator trainer.\n for _ in range(config.generator_update_ratio):\n zs_g, cs_g = session.run([\n model.z_distribution.sample_prior(config.batch_size),\n model.c_distribution.sample_prior(config.batch_size)\n ])\n _, g_loss = session.run(\n [update_g, model.g_loss], feed_dict={\n model.z_in: zs_g,\n model.c_in: cs_g\n }\n )\n\n dataset_stream.set_description((\n 'epoch: {epoch}/{epochs} | '\n 'iteration: {iteration} | '\n 'progress: [{trained}/{total}] ({progress:.0f}%) | '\n 'g loss: {g_loss:.3f} | '\n 'd loss: {d_loss:.3f} | '\n 'I(c;c|g): {estimated_mutual_information:.3f} '\n ).format(\n epoch=epoch,\n epochs=config.epochs,\n iteration=iteration,\n trained=batch_index*config.batch_size,\n total=dataset_length,\n progress=(\n 100.\n * batch_index\n * config.batch_size\n / dataset_length\n ),\n g_loss=g_loss,\n d_loss=d_loss,\n estimated_mutual_information=estimated_mutual_information,\n ))\n\n # log the generated samples.\n if iteration % config.image_log_interval == 0:\n zs, cs = session.run([\n model.z_distribution.sample_prior(config.batch_size),\n model.c_distribution.sample_prior(config.batch_size)\n ])\n summary_writer.add_summary(session.run(\n image_summary, feed_dict={\n model.z_in: zs,\n model.c_in: cs,\n }\n ), iteration)\n\n # log the statistics.\n if iteration % config.statistics_log_interval == 0:\n zs, cs = session.run([\n model.z_distribution.sample_prior(config.batch_size),\n model.c_distribution.sample_prior(config.batch_size)\n ])\n summary_writer.add_summary(session.run(\n statistics_summaries, feed_dict={\n model.z_in: zs,\n model.c_in: cs,\n model.image_in: xs\n }\n ), iteration)\n\n # save the model.\n if iteration % config.checkpoint_interval == 0:\n utils.save_checkpoint(session, model, iteration, config)\n" ]
[ [ "tensorflow.summary.FileWriter", "tensorflow.summary.image", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.summary.histogram" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Oshlack/Slinker
[ "725d2c0861156034ef4d16293e2a3b74ac23c9e7" ]
[ "tools/Canvas/Canvas/track/coverage.py" ]
[ "#=======================================================================================================================\n#\n# CANVAS\n# Author: Breon Schmidt\n# License: MIT\n#\n#=======================================================================================================================\n\n''' --------------------------------------------------------------------------------------------------------------------\nImports\n---------------------------------------------------------------------------------------------------------------------'''\n\n''' External '''\nimport plotly.graph_objects as go\nimport numpy as np\n\n''' --------------------------------------------------------------------------------------------------------------------\nClasses\n---------------------------------------------------------------------------------------------------------------------'''\n\nclass Coverage():\n\n\t\"\"\"\n\tA class that represents a bam file and associated information.\n\n\t...\n\n\tMethods\n\t-------\n\n\t\"\"\"\n\n\tdef __init__(self, sample, cov_range=False):\n\t\tself.sample = sample\n\t\tself.cov_range = cov_range\n\n\tdef _assign_block(self, block, coverage_array, region, reads, offset):\n\n\t\t\"\"\"Build up coverage by adding 1 across each, gapless block.\"\"\"\n\n\t\tstart = block[0] - region[\"start\"] + offset\n\t\tend = block[1] - region[\"start\"] + offset\n\t\tsize = region[\"end\"] - region[\"start\"]\n\n\t\tstart_in = (0 <= start <= size)\n\t\tend_in = (0 <= end <= size)\n\n\t\tif start_in and end_in:\n\t\t\tcoverage_array[start: end] += 1\n\t\t\treads += 1\n\t\telif start_in: # 5' region of read in region\n\t\t\tcoverage_array[start: size] += 1\n\t\t\treads += 1\n\t\telif end_in: # 3' region of read in region\n\t\t\tcoverage_array[0: end] += 1\n\t\t\treads += 1\n\n\t\treturn coverage_array, reads\n\n\n\tdef get(self, coverage_array, region, offset=False, log=False, cpm=False):\n\n\t\treads = 0\n\t\tcount = 0\n\n\t\tfor read in self.sample.sam.fetch(str(region[\"chr\"]), start=region[\"start\"], end=region[\"end\"]):\n\t\t\tblocks = read.get_blocks()\n\t\t\tcount += 1\n\t\t\tfor block in blocks:\n\t\t\t\tcoverage_array, reads = self._assign_block(block, coverage_array, region, reads, offset)\n\n\t\tif reads == 0:\n\t\t\tprint(\"Warning: No reads within supplied region for\", self.sample.name)\n\n\t\treturn coverage_array\n\n\tdef print(self, canvas, region, log=False, cpm=True, row=1, col=1,\n\t\t\t hover_template='<b>Coverage</b>: %{y}' + '<br/><b>Coord</b>: %{x}', coord_map=False,\n\t\t\t line=\"rgba(87, 22, 162, 0.5)\", fill=\"rgba(115, 29, 216, 0.5)\"):\n\n\t\t''' Print a coverage track onto the supplied canvas plot. '''\n\n\t\t''' Get coverage histogram '''\n\n\t\tx = np.array(range(region[\"start\"], region[\"end\"]))\n\t\ty = np.zeros(region[\"end\"]-region[\"start\"])\n\n\t\tchrom = region[\"chr\"] if isinstance(region[\"chr\"], list) else [region[\"chr\"]]\n\t\tfor i in range(0, len(chrom)):\n\t\t\ty = self.get(y, {\"chr\": chrom[i],\n\t\t\t\t\t\t\t \"start\": region[\"start\"],\n\t\t\t\t\t\t\t \"end\": region[\"end\"]},\n\t\t\t\t\t\t offset = region[\"offset\"][i],\n\t\t\t\t\t\t log=log, cpm=cpm)\n\n\t\tif cpm and not log:\n\t\t\ty = y / self.sample.lib_size * 1000000\n\t\telif cpm and log:\n\t\t\ty = np.log2(y + 1) / self.sample.lib_size * 1000000\n\t\telif log:\n\t\t\ty = np.log2(y + 1)\n\n\t\tself.cov_range = max(y)\n\n\t\tif not isinstance(coord_map, bool):\n\t\t\talt_x = []\n\t\t\tfor st_pos in x:\n\t\t\t\talt_x.append(coord_map[coord_map == st_pos].index.values[0])\n\t\telse:\n\t\t\talt_x = []\n\n\t\t''' Print Coverage '''\n\t\tcanvas.append_trace(go.Scatter(x=x, y=y, text=alt_x, fill='tozeroy', showlegend=False,\n\t\t\t\t\t\t\t\t\t hovertemplate=hover_template, fillcolor=fill, line_color=line, line_shape='hv'),\n\t\t\t\t\t\t\trow=row, col=col)\n\n\t\tcanvas.update_yaxes(title_text=self.sample.name, row=row, col=col,\n\t\t\t\t\t\t\ttitle_font=dict(size=12, color='white'), tickcolor='white',\n\t\t\t\t\t\t\ttickfont=dict(color='white', size=12), range=(0, max(y)), autorange=False)\n\n\t\tcanvas.update_xaxes(visible=False, row=row, col=col)\n\n\n" ]
[ [ "numpy.log2", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jooohhn/venezuelan-economic-analysis
[ "b61559c385677f7023240655ae636a732b0d21dd" ]
[ "stats.py" ]
[ "import csv\nimport numpy as np\nimport pandas as pd\n\ndatapath = './data/Per capita GDP at current prices - US Dollars.csv'\ndf = pd.read_csv(datapath, header=0)\ndf = df.sort_values('Year', ascending=True)\ndf = df[df['Country or Area']=='Venezuela (Bolivarian Republic of)']\ndf = df[['Year', 'Value']]\n\n# Prints summary stats for Venezuelan GDP 1970-2017 (Table1)\nprint('-------------Venezuelan Per Capita GDP % Change, 1970-2017 (Current US Dollars)----------------')\nprint(df['Value'].pct_change(axis='rows').describe())\nprint('')\n# Prints summary stats for Venezuelan GDP 1999 - 2013 (Table 1)\ndf2 = df[df['Year'] >= 1999]\ndf2 = df2[df2['Year'] <= 2013]\nprint('-------------Venezuelan Per Capita GDP % Change, 1999-2013 (Current US Dollars)----------------')\nprint(df2['Value'].pct_change(axis='rows').describe())\nprint('')\n# Prints summary stats for Venezuelan GDP 2013 - 2017 (Table 1)\nprint('-------------Venezuelan Per Capita GDP % Change, 2013-2017 (Current US Dollars)----------------')\ndf3 = df[df['Year'] >= 2013]\nprint(df3['Value'].pct_change(axis='rows').describe())\nprint('')\n\n# Prints summary stats for world GDP growth in 2017 (Table 2)\ndf = pd.read_csv(datapath, header=0)\ndf = df[df['Year'] >= 2016]\ndf = df.dropna()\ndf = df[df.duplicated(subset=['Country or Area'], keep=False)]\nobj = {'Country': [], '2016' : [], '2017' : []}\nfor index, row in df.iterrows():\n if row['Country or Area'] not in obj['Country']:\n obj['Country'].append(row['Country or Area'])\n if row['Year'] == 2016:\n obj['2016'].append(row['Value'])\n if row['Year'] == 2017:\n obj['2017'].append(row['Value'])\ndf = pd.DataFrame(data={'2016': obj['2016'], '2017': obj['2017']}, index=obj['Country'])\ndf = df.rename(columns={'2017': 'Value'})\nprint('------------- International Per Capita GDP % Change, 1970-2017 (Current US Dollars)----------------')\nprint(df.pct_change(axis='columns')['Value'].describe())\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
melloddy/public_data_extraction
[ "3932ecad1c187204555087bd85e3b8bc0fc5c6ec" ]
[ "processing/assay_processing.py" ]
[ "from typing import List\nfrom pandas import Series, DataFrame, cut, concat\nfrom numpy import log10, power\n\n\ndef categorize_assay(assay_type: str) -> str:\n if assay_type in ['A', 'P']:\n return 'ADME'\n if assay_type == 'T':\n return 'PANEL'\n return 'OTHER'\n\n\ndef nanmol_to_log(value: float) -> float:\n return -log10(value * (10**-9))\n\n\ndef log_to_nanomol(value: float) -> float:\n return power(10, 9 - value)\n\n\ndef bin_assay_values(\n values: Series,\n ascending: bool = False,\n from_median: bool = False,\n min_bin_size: int = 25,\n max_splits: int = 5,\n) -> (List[float], List[DataFrame]):\n thresholds = list()\n binned_dataframes = list()\n\n if from_median:\n median = values.median()\n max_splits_ = max_splits - 1\n potential_thresholds_ = [median]\n max_splits_upper = max_splits_ // 2 + max_splits_ % 2\n max_splits_lower = max_splits_ - max_splits_upper\n potential_thresholds_.extend([median + i for i in range(1, 1 + max_splits_upper)])\n potential_thresholds_.extend([median - i for i in range(1, 1 + max_splits_lower)])\n else:\n if ascending:\n start = values.min()\n step = 1\n else:\n start = values.max()\n step = -1\n potential_thresholds_ = [start + i * step for i in range(1, 1 + max_splits)]\n\n potential_thresholds = filter(\n lambda x: x < values.max() and x > values.min(),\n potential_thresholds_\n )\n for threshold in potential_thresholds:\n bins = cut(\n values,\n bins=[values.min(), threshold, values.max()],\n labels=[0, 1],\n include_lowest=True,\n )\n nb_ones = bins.astype(int).sum()\n if nb_ones >= min_bin_size and len(values) - nb_ones >= min_bin_size:\n thresholds.append(threshold)\n binned_dataframes.append(\n DataFrame({'values': values, f'bins': bins})\n )\n return thresholds, binned_dataframes\n\n\ndef regression_extract(df: DataFrame) -> (List, List):\n # Getting the mapping assay_id => regression_task_id\n task_ids = {assay: i for i, assay in enumerate(df.assay_id.unique())}\n temp_df = df.rename(columns={'chembl_id': 'input_compound_id'})\n temp_df['regression_task_id'] = temp_df['assay_id'].map(task_ids)\n\n # Extracting T4\n t4_df = temp_df[[\n 'input_compound_id',\n 'regression_task_id',\n 'standard_value',\n 'standard_relation',\n 'standard_units',\n 'standard_type',\n ]]\n\n # Extracting T3\n t3_df = temp_df[['assay_id', 'regression_task_id', 'assay_type', 'target_id']]\\\n .rename(columns={'assay_id': 'input_assay_id'})\\\n .drop_duplicates(subset=['regression_task_id'])\\\n .reset_index(drop=True)\n t3_df['assay_type'] = t3_df['assay_type'].apply(categorize_assay)\n\n return t3_df, t4_df\n\n\ndef bin_assays(df: DataFrame) -> (List, List, List):\n assays = df.assay_id.unique()\n t3_data = list()\n t4_data = list()\n assay_ids_to_drop = list()\n task_counter = 0\n for assay in assays:\n df_ = df[df['assay_id'] == assay]\n category = categorize_assay(df_.iloc[0].assay_type)\n target_id = df_.iloc[0].target_id\n df_ = df_[['chembl_id', 'standard_value']].set_index('chembl_id')\n df_['log_values'] = df_.standard_value.apply(nanmol_to_log)\n thresholds, binned_dfs = bin_assay_values(df_.log_values, from_median=True)\n nb_splits = len(thresholds)\n if nb_splits > 0:\n weight = 1 / nb_splits\n for threshold, binned_df in zip(thresholds, binned_dfs):\n task_counter += 1\n t4_data.append({\n 'input_compound_id': binned_df.index,\n 'classification_task_id': [task_counter]*binned_df.shape[0],\n 'class_label': binned_df.bins,\n })\n t3_data.append({\n 'classification_task_id': task_counter,\n 'input_assay_id': assay,\n 'assay_type': category,\n 'target_id': target_id,\n 'threshold_column': 'standard_value',\n 'threshold_value': log_to_nanomol(threshold),\n 'threshold_operator': '>=',\n 'weight': weight,\n })\n else:\n assay_ids_to_drop.append(assay)\n return t3_data, t4_data, assay_ids_to_drop\n\n\ndef delete_assays(\n ids_to_delete: List[int],\n df: DataFrame,\n column_name: str = 'assay_id',\n) -> DataFrame:\n df_to_drop = df[df[column_name].isin(ids_to_delete)]\n return df.drop(df_to_drop.index)\n\n\ndef build_t2(df: DataFrame) -> DataFrame:\n t2_df = df[['chembl_id', 'canonical_smiles']].rename(\n columns={\n 'chembl_id': 'input_compound_id',\n 'canonical_smiles': 'smiles',\n }\n )\n t2_df.drop_duplicates(inplace=True)\n return t2_df\n\n\ndef build_t3(t3_data: List) -> DataFrame:\n t3_df = DataFrame(t3_data)\n return t3_df\n\n\ndef build_t4(t4_data: List) -> DataFrame:\n t4_df = concat([DataFrame(d) for d in t4_data])\n return t4_df\n" ]
[ [ "numpy.log10", "pandas.DataFrame", "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
dig-data-r3/big-data-first-project
[ "2d2443e4e9a4596ef1a61324a5a80e7b81b5a68c" ]
[ "generate_datasets.py" ]
[ "#!/usr/bin/env python3\n\nimport pandas as pd\n\n\ndef sample_all_sizes(dataset):\n for size in dataset_sizes:\n n_rows = round(dataset.shape[0] * size)\n sampled_df = dataset.sample(n=n_rows, random_state=42, replace=True)\n filename = 'dataset/historical_stock_prices{}.csv'.format(int(size*2048))\n sampled_df.to_csv(filename, index=False)\n\n\ndataset_sizes = [0.125, 0.25, 0.5, 2]\nstock_prices = pd.read_csv('dataset/historical_stock_prices.csv')\nsample_all_sizes(stock_prices)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
jingshenSN2/HOIP_bandgap_prediction
[ "fc989c47e20d98490d7d9679cb6f9a6b2b0b9ebc" ]
[ "training/MLP.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom sklearn.neural_network import MLPRegressor as mlp\nfrom sklearn.model_selection import GridSearchCV as gs\nfrom preprocessing import drop_preprocessing as pre\nimport joblib\nimport os\n\ndef MLP(data_directory, model_dir, features):\n X_train, X_test, y_train, y_test, predict_X, features = pre(data_directory, features)\n os.chdir(model_dir)\n model = mlp(random_state=1, max_iter=10000)\n grid = gs(estimator=model,\n param_grid={'hidden_layer_sizes': [(500, 500)], 'activation':\n ['logistic', 'tanh', 'relu'], 'alpha': np.exp(2.303 * np.arange(-8, 0)),\n 'learning_rate': ['constant']}, cv=5, n_jobs=6)\n grid.fit(X_train, y_train)\n print(grid.best_params_)\n print(grid.best_estimator_.score(X_test, y_test))\n\n joblib.dump(grid.best_estimator_, 'mlp_%d_%.4f.m'%(len(features),grid.best_estimator_.score(X_test, y_test)))\n\n df = pd.DataFrame(columns=['ml_bandgap', 'pbe_bandgap'])\n df['pbe_bandgap'] = y_test\n df['ml_bandgap'] = grid.best_estimator_.predict(X_test)\n print(df)" ]
[ [ "numpy.arange", "pandas.DataFrame", "sklearn.neural_network.MLPRegressor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
tardani95/iris-distro
[ "dbb1ebbde2e52b4cc747b4aa2fe88518b238071a" ]
[ "src/python/irispy/test/python_interface_test.py" ]
[ "from __future__ import print_function\n\nimport irispy\nimport irispy.iris_wrapper\nimport numpy as np\n\n\ndef testInterface(show=False):\n p2 = irispy.Polyhedron()\n p2.setA(np.eye(2))\n p2.setB(np.array([3.0, 4.0]))\n print(p2.contains(np.array([2.5, 5.5]), 0.0))\n\n p3 = irispy.Polyhedron.fromBounds([-1, -1], [2, 2])\n\n problem = irispy.IRISProblem(2)\n problem.setBounds(irispy.Polyhedron.fromBounds([-1, -1], [2, 2]))\n problem.setSeedPoint(np.array([0.0, 0.0]))\n problem.addObstacle(np.array([[1.5, 2], [1.5, 2]]))\n region = irispy.iris_wrapper.inflate_region(problem, irispy.IRISOptions())\n print(region)\n print(region.getPolyhedron().generatorPoints())\n print(region.getEllipsoid().getC())\n print(region.getEllipsoid().getD())\n\n import matplotlib.pyplot as plt\n region.polyhedron.draw2d()\n region.ellipsoid.draw2d()\n plt.gca().set_xlim([-1.5, 2.5])\n plt.gca().set_ylim([-1.5, 2.5])\n if show:\n plt.show()\n\nif __name__ == '__main__':\n testInterface(True)\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.eye", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jf20541/LogisticRegressionPyTorch
[ "1900a85bec7c4b1a6a4693625f52083c46df1409" ]
[ "src/train.py" ]
[ "import pandas as pd\nfrom model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, roc_auc_score\nimport argparse\nimport joblib\nimport os\nimport config\n\n\ndef run(fold):\n df = pd.read_csv(config.TRAIN_FOLDS)\n df_train = df[df.kfold != fold].reset_index(drop=True)\n df_test = df[df.kfold == fold].reset_index(drop=True)\n\n # drop the label column from df and convert to numpy array\n x_train = df_train.drop(\"RainTomorrow\", axis=1).values\n y_train = df_train.RainTomorrow.values\n x_test = df_test.drop(\"RainTomorrow\", axis=1).values\n y_test = df_test.RainTomorrow.values\n\n # initiate the Logistic Regression model\n model = LogisticRegression(0.0001, 200)\n model.fit(x_train, y_train)\n pred = model.predict(x_test)\n acc = accuracy_score(y_test, pred) * 100\n print(f\"Logistic Regression Accuracy: {acc:0.2f}% for Fold={fold}\")\n # save the model\n joblib.dump(model, os.path.join(config.MODEL_PATH, f\"LR_fold{fold}.bin\"))\n\n\nif __name__ == \"__main__\":\n # initializing Argument Parser class\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--fold\", type=int)\n # read the arguments from CL\n args = parser.parse_args()\n # run the fold specified by CL arguments\n run(fold=args.fold)\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
kad-ecoli/rateratio_test
[ "a82596d94ad790a6da717a3758acd6af956a94e3" ]
[ "p_adjust.py" ]
[ "#!/usr/bin/env python\ndocstring='''\np.adjust: P-values for Multiple Comparisons\n\nDescription\n Given a set of p-values, return q-values (i.e. p-values adjusted).\n This module is a python re-implement of p.adjust R package.\n\nUsage\n from p_adjust import adjust as p_adjust\n p_adjust(p, method, n)\n\nArguments\n p numpy array of p-values\n method correction method. default: fdr\n n number of comparisons, must be at least len(p); only set\n this (to non-default) when you know what you are doing!\n default: len(p)\n\nDetails\n The adjustment methods include the Bonferroni correction (\"bonferroni\")\n in which the p-values are multiplied by the number of comparisons. Less\n conservative corrections are also included by Benjamini & Hochberg\n (1995) (\"BH\" or its alias \"fdr\"). A pass-through option (\"none\") is also \n included. The set of methods are contained in the \"p_adjust_methods\"\n list for the benefit of methods that need to have the method as an option\n and pass it on to \"p_adjust\".\n\n The Bonferroni method is designed to give strong control of the\n family-wise error rate.\n\n The \"BH\" (aka \"fdr\") method of Benjamini & Hochberg controls the false \n discovery rate, the expected proportion of false discoveries amongst\n the rejected hypotheses. The false discovery rate is a less stringent\n condition than the family-wise error rate, so these methods are more\n powerful than the others.\n\n Note that you can set 'n' larger than 'len(n)' which means the\n unobserved p-values are assumed to be greater than all the observed\n p for 'bonferroni' methods and equal to 1 for the other methods.\n\nValue\n A numpy array of corrected p-values (of the same length as 'p').\n\nReferences\n Benjamini, Y., and Hochberg, Y. (1995). Controlling the false\n discovery rate: a practical and powerful approach to multiple\n testing. Journal of the Royal Statistical Society Series B\n 57, 289-300.\n\nExamples\n from p_adjust import p_adjust\n p_adjust([0.03,0.2,0.4], method=\"fdr\")\n'''\n\nimport numpy as np\nfrom scipy import interpolate\n\np_adjust_methods=[\"fdr\",\"bonferroni\",\"none\"]\n\n# This function is partly based on Nicolo Fusi's qvalue code at\n# https://github.com/nfusi/qvalue/blob/master/qvalue/qvalue.py\ndef p_adjust(p, method=\"fdr\", n=None):\n '''\n p - numpy array of p-values\n method - correction method. default: fdr\n n - number of comparisons, must be at least len(p); only set\n this (to non-default) when you know what you are doing!\n default: len(p)\n '''\n method=str(method).lower()\n if method==\"bh\":\n method=\"fdr\"\n assert(method in p_adjust_methods\n ), \"ERROR! method must be one of \"+','.join(p_adjust_methods)\n\n if isinstance(p,list):\n p=np.array(p)\n\n if method==\"none\":\n return p\n\n original_shape = p.shape\n p = p.ravel() # flattens the array in place\n\n if n>0:\n n *= 1.\n else:\n n = 1.*len(p)\n\n if method==\"bonferroni\":\n qv = n * p\n qv[qv>1]=1.\n qv = qv.reshape(original_shape)\n return qv\n\n # if the number of hypotheses is small, just set pi0 to 1\n if len(p) < 100:\n pi0 = 1.0\n else:\n # evaluate pi0 for different lambdas\n pi0 = []\n lam = np.arange(0, 0.90, 0.01)\n counts = np.array([(p > i).sum() for i in np.arange(0, 0.9, 0.01)])\n for l in range(len(lam)):\n pi0.append(counts[l]/(n*(1-lam[l])))\n\n pi0 = np.array(pi0)\n\n # fit natural cubic spline\n tck = interpolate.splrep(lam, pi0, k=3)\n pi0 = interpolate.splev(lam[-1], tck)\n\n if pi0 > 1:\n pi0 = 1.0\n if pi0 < 0:\n print(\"WARNING! pi0 is not between 0 and 1: %f\" % pi0)\n\n p_ordered = np.argsort(p)\n p = p[p_ordered]\n qv = pi0 * n/len(p) * p\n qv[-1] = min(qv[-1], 1.0)\n\n for i in xrange(len(p)-2, -1, -1):\n qv[i] = min(pi0*n*p[i]/(i+1.0), qv[i+1])\n\n # reorder qvalues\n qv_temp = qv.copy()\n qv = np.zeros(qv.shape)\n qv[p_ordered] = qv_temp\n\n # reshape qvalues\n qv = qv.reshape(original_shape)\n return qv\n\nif __name__==\"__main__\":\n print(docstring)\n" ]
[ [ "scipy.interpolate.splrep", "numpy.arange", "scipy.interpolate.splev", "numpy.argsort", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
Redoblue/deep-person-reid
[ "5ede06fcffb80672a19a81b065a604ece78dadb4" ]
[ "torchreid/dataset_loader.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nfrom PIL import Image\nimport numpy as np\nimport os.path as osp\nimport io\n\nimport torch\nfrom torch.utils.data import Dataset\n\n\ndef read_image(img_path):\n \"\"\"Keep reading image until succeed.\n This can avoid IOError incurred by heavy IO process.\"\"\"\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img\n\n\nclass ImageDataset(Dataset):\n \"\"\"Image Person ReID Dataset\"\"\"\n def __init__(self, dataset, transform=None):\n self.dataset = dataset\n self.transform = transform\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n img_path, pid, camid = self.dataset[index]\n img = read_image(img_path)\n \n if self.transform is not None:\n img = self.transform(img)\n \n return img, pid, camid, img_path\n\n\nclass VideoDataset(Dataset):\n \"\"\"Video Person ReID Dataset.\n Note batch data has shape (batch, seq_len, channel, height, width).\n \"\"\"\n _sample_methods = ['evenly', 'random', 'all']\n\n def __init__(self, dataset, seq_len=15, sample_method='evenly', transform=None):\n self.dataset = dataset\n self.seq_len = seq_len\n self.sample_method = sample_method\n self.transform = transform\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n img_paths, pid, camid = self.dataset[index]\n num = len(img_paths)\n\n if self.sample_method == 'random':\n \"\"\"\n Randomly sample seq_len items from num items,\n if num is smaller than seq_len, then replicate items\n \"\"\"\n indices = np.arange(num)\n replace = False if num >= self.seq_len else True\n indices = np.random.choice(indices, size=self.seq_len, replace=replace)\n # sort indices to keep temporal order (comment it to be order-agnostic)\n indices = np.sort(indices)\n \n elif self.sample_method == 'evenly':\n \"\"\"\n Evenly sample seq_len items from num items.\n \"\"\"\n if num >= self.seq_len:\n num -= num % self.seq_len\n indices = np.arange(0, num, num/self.seq_len)\n else:\n # if num is smaller than seq_len, simply replicate the last image\n # until the seq_len requirement is satisfied\n indices = np.arange(0, num)\n num_pads = self.seq_len - num\n indices = np.concatenate([indices, np.ones(num_pads).astype(np.int32)*(num-1)])\n assert len(indices) == self.seq_len\n \n elif self.sample_method == 'all':\n \"\"\"\n Sample all items, seq_len is useless now and batch_size needs\n to be set to 1.\n \"\"\"\n indices = np.arange(num)\n \n else:\n raise ValueError(\"Unknown sample method: {}. Expected one of {}\".format(self.sample_method, self._sample_methods))\n\n imgs = []\n for index in indices:\n img_path = img_paths[int(index)]\n img = read_image(img_path)\n if self.transform is not None:\n img = self.transform(img)\n img = img.unsqueeze(0)\n imgs.append(img)\n imgs = torch.cat(imgs, dim=0)\n\n return imgs, pid, camid\n" ]
[ [ "numpy.random.choice", "torch.cat", "numpy.arange", "numpy.sort", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
geflaspohler/poold
[ "4ddd085597b85e8c737b2f904f504437f728513b" ]
[ "poold/utils/visualize.py" ]
[ "\"\"\" Visualizations for online learning.\n\nFor example:\n import poold\n poold.visualize(learner.history)\n plt.show()\n\n\"\"\"\n# System imports\nimport numpy as np\nimport pandas as pd\nimport copy\nimport os\n\n# Plotting imports\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.gridspec as gridspec\nfont = {'family' : 'lmodern',\n 'weight' : 'normal',\n 'size' : 50}\ntext = {'usetex' : True}\nmatplotlib.rc('font',**font)\nmatplotlib.rc('text',**text)\nmatplotlib.rcParams['text.latex.preamble']=r\"\\usepackage{amsmath}\"\n\nimport seaborn as sns\nsns.set(font_scale=1.6)\nsns.set_style(\"white\")\n\ndef visualize_multiple(experiment_list, style_algs, subset_time=None, filename=\"temp\"):\n num_plots = len(experiment_list)\n fig_r, ax_r = plt.subplots(1, 1, figsize=(10, 4), sharex=False)\n fig_p, ax_p = plt.subplots(1, 1, figsize=(5, 4), sharex=False)\n fig_w, ax_w = plt.subplots(1, num_plots, figsize=(4*num_plots, 4), sharex=False)\n\n df_losses = None\n for i, (targets, regret_periods, model_alias, history) in enumerate(experiment_list):\n df = visualize(history, regret_periods, targets, model_alias, style_algs, ax=[ax_w[i], ax_r, ax_p], subset_time=subset_time, legend=(i==0))\n if df_losses is None:\n df_losses = copy.copy(df)\n else:\n df_losses = df_losses.merge(df)\n mean_losses = df_losses.mean(axis=0)\n\n # Save dataframe in latex table format\n fname = f\"./eval/losses_{filename}.tex\"\n mean_losses.to_latex(fname, float_format=\"%.3f\", longtable=False)\n\n fig_w.tight_layout()\n fig_w.subplots_adjust(top=0.95, wspace=0, hspace=0)\n fig_r.tight_layout()\n fig_r.subplots_adjust(top=0.95)\n fig_p.tight_layout()\n fig_p.subplots_adjust(top=0.95)\n\n if not os.path.exists('figs'):\n os.mkdir('figs')\n filename_w = f\"./figs/weights_{filename}.pdf\"\n filename_r = f\"./figs/regret_{filename}.pdf\"\n filename_p = f\"./figs/params_{filename}.pdf\"\n fig_w.savefig(filename_w, bbox_inches='tight')\n fig_r.savefig(filename_r, bbox_inches='tight')\n fig_p.savefig(filename_p, bbox_inches='tight')\n\n return mean_losses\n\ndef visualize(history, regret_periods=None, time_labels=None, model_labels={}, \n style_algs={}, ax=[None, None, None], params=[\"lam\"], subset_time=None, legend=True):\n \"\"\" Visualize online learning losses, weights, and parameters.\n\n Args:\n history (History): online learning History object\n regret_periods (list[tuple]): list of tuples specifying the start (inclusive) and end\n points (not inclusive) of regret periods\n time_labels (list): list of labels for the time periods\n model_labels (dict): dictionary of model labels\n style_algs (dict): dictionary of model styles\n ax (list[ax]): list of axis objects for plotting the weights, regret, and parameter\n plots respectively.\n params (list[str]): list of parameters to plot\n subset_time (tuple): plot values from times[subset_time[0]:subset_time[1]]\n legend (bool): if True, plot legend.\n \"\"\"\n times = history.get_times()\n if time_labels is None:\n time_labels = range(len(times))\n\n if subset_time is not None:\n times = times[subset_time[0]:subset_time[1]]\n time_labels = time_labels[subset_time[0]:subset_time[1]]\n \n if regret_periods is None:\n regret_periods = [(0, len(times))]\n\n if subset_time is not None:\n subset_regret_periods = []\n for s, e in regret_periods:\n if s in times and e in times:\n subset_regret_periods.append((times.index(s), times.index(e)))\n elif s in times:\n subset_regret_periods.append((times.index(s), len(times)))\n elif e in times:\n subset_regret_periods.append((0, times.index(e)))\n regret_periods = subset_regret_periods\n\n assert(len(time_labels) == len(times))\n\n df_losses = pd.DataFrame(columns=history.models+[\"online_learner\"], index=time_labels)\n df_weights = pd.DataFrame(columns=history.models, index=time_labels)\n \n param = history.get_params(0)\n if len(param) > 0:\n param_labels = list(set(params).intersection(set(param.keys())))\n df_params = pd.DataFrame(columns=param_labels, index=time_labels)\n else:\n df_params = None\n\n for t, time in enumerate(times):\n loss_obj, loss_learner, loss_grad = history.get_loss(time)\n play_learner = history.get_play(time, return_past=False)\n loss_learner = loss_obj['fun'](w=play_learner)\n params_learner = history.get_params(time)\n\n loss_all = loss_obj.get('exp', {})\n loss_all['online_learner'] = loss_learner\n\n # Assign loss and weight dataframe\n df_losses.iloc[t] = loss_all\n df_weights.iloc[t] = dict(zip(history.models, play_learner))\n if df_params is not None:\n df_params.iloc[t] = params_learner\n\n plot_weights(df_weights, regret_periods, model_labels, style_algs, ax[0], legend, subset_time)\n if not df_losses[history.models].isna().all(axis=None):\n plot_regret(df_losses, regret_periods, model_labels, style_algs, history.models, ax[1], only_learner=True, subset_time=subset_time)\n if df_params is not None:\n plot_params(df_params, regret_periods, model_labels[\"online_learner\"], style_algs, ax[2], subset_time=subset_time)\n\n return df_losses.rename({\"online_learner\": model_labels[\"online_learner\"]}, axis=1)\n\ndef plot_weights(df, regret_periods, model_labels, style_algs, ax=None, legend=True, subset_time=None):\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=(5, 4), sharex=False)\n\n for m in df.columns:\n alias = model_labels.get(m, m)\n style = style_algs.get(alias, {})\n alias = alias.replace(\"_\", \"\\_\")\n ax.plot(df.index, df[m], label=alias, **style)\n\n ax.set_title(model_labels.get(\"online_learner\", \"Online Learner\").replace(\"_\", \"\\_\") + \" weights $\\mathbf{w}_t$\")\n ax.set_ylim([0.0, 1.0])\n\n if legend:\n handles, labels = ax.get_legend_handles_labels()\n # sort both labels and handles by labels\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))\n ax.legend(handles, labels, prop={'size': 15}, ncol=1, loc='best')\n else:\n ax.set_yticks([])\n ticks = ax.get_xticks()\n # ax.set_xticks(ticks[0:-1])\n\n if subset_time is not None:\n # Date based formatting\n if isinstance(df.index, pd.DatetimeIndex):\n datefmt = mdates.DateFormatter('%b')\n ax.xaxis.set_major_formatter(datefmt)\n\n plot_time_seperators(regret_periods, df.index, ax)\n\ndef plot_regret(df, regret_periods, model_labels, style_algs, input_models, ax=None, only_learner=False, subset_time=None):\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=(5, 4), sharex=False)\n\n # Find best model per regret period\n if only_learner:\n model_list = [\"online_learner\"]\n else:\n model_list = list(df.columns)\n\n mean_loss = df.mean(axis=0)\n zeros = pd.Series(0, index=df.index)\n df_regret = pd.DataFrame(columns=model_list, index=df.index)\n for s, e in regret_periods:\n cumsum = df.iloc[s:e].cumsum(axis=0)\n best_model = pd.to_numeric(cumsum[input_models].iloc[-1]).idxmin()\n relative_cumsum = cumsum - cumsum[[best_model]].values\n df_regret.loc[relative_cumsum.index, :] = relative_cumsum[model_list].values\n\n for m in df_regret.columns:\n alias = model_labels.get(m, m)\n style = style_algs.get(alias, {})\n alias = alias.replace(\"_\", \"\\_\")\n\n label = f\"{alias} \\t (RMSE: {mean_loss[m]: .3f})\" \n ax.plot(df_regret.index, df_regret[m], label=label, **style)\n\n ax.plot(zeros.index, zeros, c='k', linestyle=\"-\")\n ax.set_title(\"Cumulative regret (RMSE loss)\")\n\n # Sort both labels and handles by labels\n handles, labels = ax.get_legend_handles_labels()\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))\n ax.legend(handles, labels, prop={'size': 15}, ncol=2, loc='best')\n\n plot_time_seperators(regret_periods, df.index, ax)\n\n if subset_time is not None:\n # Date based formatting\n if isinstance(df.index, pd.DatetimeIndex):\n datefmt = mdates.DateFormatter('%b')\n ax.xaxis.set_major_formatter(datefmt)\n\ndef plot_params(df, regret_periods, model_alias, style_algs, ax=None, subset_time=None):\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=(5, 4), sharex=False)\n\n for m in df.columns:\n style = style_algs.get(model_alias, {})\n model_alias = model_alias.replace(\"_\", \"\\_\")\n final_val = df.iloc[-1][\"lam\"]\n label = f\"{model_alias}\" + \"\\t ($\\lambda_{T}$ = \" + f\"{final_val: .3f})\"\n ax.plot(df.index, df[m], label=label, **style)\n\n ticks = ax.get_xticks()\n # ticks[-1] = ticks[-2]\n ax.set_xticks(ticks)\n\n ax.set_title(\"Regularization $\\lambda_t$\")\n\n handles, labels = ax.get_legend_handles_labels()\n # sort both labels and handles by labels\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))\n ax.legend(handles, labels, prop={'size': 15}, ncol=1, loc='best')\n\n\n plot_time_seperators(regret_periods, df.index, ax)\n\n if subset_time is not None:\n # Date based formatting\n if isinstance(df.index, pd.DatetimeIndex):\n datefmt = mdates.DateFormatter('%b')\n ax.xaxis.set_major_formatter(datefmt)\n\ndef plot_time_seperators(regret_periods, index, ax):\n ''' Local utiliy function for plotting vertical time seperators '''\n for start, end in regret_periods:\n start_time = index[start]\n if end == len(index):\n end -= 1\n elif end > len(index):\n raise ValueError(\"Bad time seperator\", start, end)\n end_time = index[end]\n ax.axvline(x=start_time, c='k', linestyle='-.', linewidth=1.0)" ]
[ [ "matplotlib.dates.DateFormatter", "pandas.Series", "matplotlib.pyplot.subplots", "pandas.DataFrame", "pandas.to_numeric", "matplotlib.rc" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
khanfarhan10/scikit-learn-mooc
[ "37f1e34eae0304a92f557a8194e068b4333ad418" ]
[ "python_scripts/parameter_tuning_ex_03.py" ]
[ "# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.6.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # 📝 Exercise 02\n#\n# The goal is to find the best set of hyperparameters which maximize the\n# statistical performance on a training set.\n#\n# Here again with limit the size of the training set to make computation\n# run faster. Feel free to increase the `train_size` value if your computer\n# is powerful enough.\n\n# %%\nimport numpy as np\nimport pandas as pd\n\nadult_census = pd.read_csv(\"../datasets/adult-census.csv\")\n\ntarget_name = \"class\"\ntarget = adult_census[target_name]\ndata = adult_census.drop(columns=[target_name, \"education-num\"])\n\nfrom sklearn.model_selection import train_test_split\n\ndata_train, data_test, target_train, target_test = train_test_split(\n data, target, random_state=42)\n\n# %% [markdown]\n# Create your machine learning pipeline\n#\n# You should:\n# * preprocess the categorical columns using a `OneHotEncoder` and use a\n# `StandardScaler` to normalize the numerical data.\n# * use a `LogisticRegression` as a predictive model.\n\n# %% [markdown]\n# Start by defining the columns and the preprocessing pipelines to be applied\n# on each columns.\n\n# %%\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\n\n# %% [markdown]\n# Subsequently, create a `ColumnTransformer` to redirect the specific columns\n# a preprocessing pipeline.\n\n# %%\nfrom sklearn.compose import ColumnTransformer\n\n# %% [markdown]\n# Finally, concatenate the preprocessing pipeline with a logistic regression.\n\n# %%\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.linear_model import LogisticRegression\n\n# %% [markdown]\n# Make your random search\n#\n# Use a `RandomizedSearchCV` to find the best set of hyperparameters by tuning\n# the following parameters for the `LogisticRegression` model:\n# - `C` with values ranging from 0.001 to 10. You can use a log-uniform\n# distribution (i.e. `scipy.stats.loguniform`);\n# - `solver` with possible values being `\"liblinear\"` and `\"lbfgs\"`;\n# - `penalty` with possible values being `\"l2\"` and `\"l1\"`;\n#\n# In addition, try several preprocessing strategies with the `OneHotEncoder`\n# by always (or not) dropping the first column when encoding the categorical\n# data.\n#\n# Notes: some combinations of the hyperparameters proposed above are invalid.\n# You can make the parameter search accept such failures by setting\n# `error_score` to `np.nan`. The warning messages give more details on which\n# parameter combinations but the computation will proceed.\n#\n# Once the computation has completed, print the best combination of parameters\n# stored in the `best_params_` attribute.\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
hsheth2/vox
[ "4893a46564406c47c7177dd58b28b364bb60f0e2" ]
[ "output_wav/play_files.py" ]
[ "#!usr/bin/env python\n# coding=utf-8\nimport pyaudio\nimport wave\nimport sys\nimport scipy.io.wavfile\nimport scipy.interpolate\nimport numpy as np\nfrom os.path import isfile\n\n# length of data to read.\nchunk = 1024\n\n# create an audio object\np = pyaudio.PyAudio()\nstream = None\nframes = None\nfilename = None\n\ninterpol_frame = 1600\n\n\ndef spec_smooth(frame, frame_previous):\n \"\"\"\n Modified spectral smoothing algorithm with cubic spline interpolation\n \"\"\"\n if frames is None:\n return None\n\n frame_num = [i for i in range(len(frame_previous))] + \\\n [interpol_frame + i + len(frames_previous) for i in range(len(frame))]\n frame_arr = np.concatenate((frame_previous + frame))\n\n cs_interpolation = scipy.interpolate.CubicSpline(x=frame_num, y=frame_arr)\n interpol_val = np.array([cs_interpolation(len(frames_previous) + i) for i in range(interpol_frame)])\n\n return np.concatenate((interpol_val, frame))\n\n\ntry:\n filename_old = filename\n for filename in sys.stdin:\n filename = filename.strip()\n if len(filename) is 0:\n continue\n\n # open the file for reading.\n frames_previous = frames\n wf = wave.open(filename, \"rb\")\n\n # Begin smoothing code\n if filename_old is not None:\n filename_new = \"./output_wav/\" + filename.split('/')[-1][:-4] + \"-\" + \\\n filename_old.split('/')[-1][:-4] + \".wav\"\n\n if not isfile(filename_new):\n rate, frames = scipy.io.wavfile.read(filename)\n add_frames = spec_smooth(frames, frames_previous)\n scipy.io.wavfile.write(filename_new, rate, add_frames)\n wf = wave.open(filename_new, \"rb\")\n\n # End smoothing code\"\"\"\n\n # open stream based on the wave object which has been input.\n if stream is None:\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True)\n\n # read data (based on the chunk size)\n data = wf.readframes(chunk)\n\n # play stream (looping from beginning of file to the end)\n while data != '':\n # writing to the stream is what *actually* plays the sound.\n stream.write(data)\n data = wf.readframes(chunk)\n\nexcept KeyboardInterrupt:\n pass\nfinally:\n # cleanup stuff.\n if stream is not None:\n stream.close()\n p.terminate()\n print >> sys.stderr, \"Cleanup finished\"\n" ]
[ [ "numpy.concatenate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hnlab/dgl-lifesci
[ "49e36f46a389a8e3841323ad199bb86f4af0a597" ]
[ "tests/model/test_readout.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\nimport dgl\nimport torch\nimport torch.nn.functional as F\n\nfrom dgllife.model.readout import *\n\ndef test_graph1():\n \"\"\"Graph with node features\"\"\"\n g = dgl.graph(([0, 0, 1], [1, 2, 2]))\n return g, torch.arange(g.number_of_nodes()).float().reshape(-1, 1)\n\ndef test_graph2():\n \"Batched graph with node features\"\n g1 = dgl.graph(([0, 0, 1], [1, 2, 2]))\n g2 = dgl.graph(([0, 1, 1, 1], [1, 2, 3, 4]))\n bg = dgl.batch([g1, g2])\n return bg, torch.arange(bg.number_of_nodes()).float().reshape(-1, 1)\n\ndef test_attentive_fp_readout():\n if torch.cuda.is_available():\n device = torch.device('cuda:0')\n else:\n device = torch.device('cpu')\n\n g, node_feats = test_graph1()\n g, node_feats = g.to(device), node_feats.to(device)\n bg, batch_node_feats = test_graph2()\n bg, batch_node_feats = bg.to(device), batch_node_feats.to(device)\n model = AttentiveFPReadout(feat_size=1,\n num_timesteps=1).to(device)\n assert model(g, node_feats).shape == torch.Size([1, 1])\n assert model(bg, batch_node_feats).shape == torch.Size([2, 1])\n\ndef test_mlp_readout():\n if torch.cuda.is_available():\n device = torch.device('cuda:0')\n else:\n device = torch.device('cpu')\n\n g, node_feats = test_graph1()\n g, node_feats = g.to(device), node_feats.to(device)\n bg, batch_node_feats = test_graph2()\n bg, batch_node_feats = bg.to(device), batch_node_feats.to(device)\n\n model = MLPNodeReadout(node_feats=1,\n hidden_feats=2,\n graph_feats=3,\n activation=F.relu,\n mode='sum').to(device)\n assert model(g, node_feats).shape == torch.Size([1, 3])\n assert model(bg, batch_node_feats).shape == torch.Size([2, 3])\n\n model = MLPNodeReadout(node_feats=1,\n hidden_feats=2,\n graph_feats=3,\n mode='max').to(device)\n assert model(g, node_feats).shape == torch.Size([1, 3])\n assert model(bg, batch_node_feats).shape == torch.Size([2, 3])\n\n model = MLPNodeReadout(node_feats=1,\n hidden_feats=2,\n graph_feats=3,\n mode='mean').to(device)\n assert model(g, node_feats).shape == torch.Size([1, 3])\n assert model(bg, batch_node_feats).shape == torch.Size([2, 3])\n\ndef test_weave_readout():\n if torch.cuda.is_available():\n device = torch.device('cuda:0')\n else:\n device = torch.device('cpu')\n\n g, node_feats = test_graph1()\n g, node_feats = g.to(device), node_feats.to(device)\n bg, batch_node_feats = test_graph2()\n bg, batch_node_feats = bg.to(device), batch_node_feats.to(device)\n\n model = WeaveGather(node_in_feats=1).to(device)\n assert model(g, node_feats).shape == torch.Size([1, 1])\n assert model(bg, batch_node_feats).shape == torch.Size([2, 1])\n\n model = WeaveGather(node_in_feats=1, gaussian_expand=False).to(device)\n assert model(g, node_feats).shape == torch.Size([1, 1])\n assert model(bg, batch_node_feats).shape == torch.Size([2, 1])\n\ndef test_weighted_sum_and_max():\n if torch.cuda.is_available():\n device = torch.device('cuda:0')\n else:\n device = torch.device('cpu')\n\n g, node_feats = test_graph1()\n g, node_feats = g.to(device), node_feats.to(device)\n bg, batch_node_feats = test_graph2()\n bg, batch_node_feats = bg.to(device), batch_node_feats.to(device)\n model = WeightedSumAndMax(in_feats=1).to(device)\n assert model(g, node_feats).shape == torch.Size([1, 2])\n assert model(bg, batch_node_feats).shape == torch.Size([2, 2])\n\nif __name__ == '__main__':\n test_attentive_fp_readout()\n test_mlp_readout()\n test_weave_readout()\n test_weighted_sum_and_max()\n" ]
[ [ "torch.device", "torch.Size", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dataronio/GLRM
[ "58bdb64f1aafeb06eb0a0fbf74e49138f0a19da6" ]
[ "glrm/loss.py" ]
[ "import cvxpy as cp\nfrom numpy import ones, maximum, minimum, sign, floor, ceil\n\n\"\"\"\nAbstract loss class and canonical loss functions.\n\"\"\"\n\n# Abstract Loss class\nclass Loss(object):\n def __init__(self, A): return\n def loss(self, A, U): raise NotImplementedError(\"Override me!\")\n def encode(self, A): return A # default\n def decode(self, A): return A # default\n def __str__(self): return \"GLRM Loss: override me!\"\n def __call__(self, A, U): return self.loss(A, U)\n\n# Canonical loss functions\nclass QuadraticLoss(Loss):\n def loss(self, A, U): return cp.norm(cp.Constant(A) - U, \"fro\")/2.0\n def __str__(self): return \"quadratic loss\"\n\nclass HuberLoss(Loss):\n a = 1.0 # XXX does the value of 'a' propagate if we update it?\n def loss(self, A, U): return cp.sum_entries(cp.huber(cp.Constant(A) - U, self.a))\n def __str__(self): return \"huber loss\"\n\n# class FractionalLoss(Loss):\n# PRECISION = 1e-10\n# def loss(self, A, U):\n# B = cp.Constant(A)\n# U = cp.max_elemwise(U, self.PRECISION) # to avoid dividing by zero\n# return cp.max_elemwise(cp.mul_elemwise(cp.inv_pos(cp.pos(U)), B-U), \\\n# return maximum((A - U)/U, (U - A)/A)\n# \n\nclass HingeLoss(Loss):\n def loss(self, A, U): return cp.sum_entries(cp.pos(ones(A.shape)-cp.mul_elemwise(cp.Constant(A), U)))\n def decode(self, A): return sign(A) # return back to Boolean\n def __str__(self): return \"hinge loss\"\n\nclass OrdinalLoss(Loss):\n def __init__(self, A):\n self.Amax, self.Amin = A.max(), A.min()\n def loss(self, A, U):\n return cp.sum_entries(sum(cp.mul_elemwise(1*(b >= A),\\\n cp.pos(U-b*ones(A.shape))) + cp.mul_elemwise(1*(b < A), \\\n cp.pos(-U + (b+1)*ones(A.shape))) for b in range(int(self.Amin), int(self.Amax))))\n def decode(self, A): return maximum(minimum(A.round(), self.Amax), self.Amin)\n def __str__(self): return \"ordinal loss\"\n" ]
[ [ "numpy.sign", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
terrafin/iss-rnns
[ "e3cfa64a9fa70ffe22f6c4a53686251617e152da" ]
[ "ptb/ptb_word_lm_heter.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Example / benchmark for building a PTB LSTM model.\n\nTrains the model described in:\n(Zaremba, et. al.) Recurrent Neural Network Regularization\nhttp://arxiv.org/abs/1409.2329\n\nThere are 3 supported model configurations:\n===========================================\n| config | epochs | train | valid | test\n===========================================\n| small | 13 | 37.99 | 121.39 | 115.91\n| medium | 39 | 48.45 | 86.16 | 82.07\n| large | 55 | 37.87 | 82.62 | 78.29\nThe exact results may vary depending on the random initialization.\n\nThe hyperparameters used in the model:\n- init_scale - the initial scale of the weights\n- learning_rate - the initial value of the learning rate\n- max_grad_norm - the maximum permissible norm of the gradient\n- num_layers - the number of LSTM layers\n- num_steps - the number of unrolled steps of LSTM\n- hidden_size - the number of LSTM units\n- max_epoch - the number of epochs trained with the initial learning rate\n- max_max_epoch - the total number of epochs for training\n- keep_prob - the probability of keeping weights in the dropout layer\n- lr_decay - the decay of the learning rate for each epoch after \"max_epoch\"\n- batch_size - the batch size\n\nThe data required for this example is in the data/ dir of the\nPTB dataset from Tomas Mikolov's webpage:\n\n$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz\n$ tar xvf simple-examples.tgz\n\nTo run:\n\n$ python ptb_word_lm.py --data_path=simple-examples/data/\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport time\nimport pylab\nimport json\nimport logging\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.core.framework import summary_pb2\nfrom time import gmtime, strftime\n\nimport reader\nimport importlib\nimport os.path\nimport matplotlib.pyplot as plt\n\nflags = tf.flags\nzero_threshold = 0.0001\n\nflags.DEFINE_string(\n \"model\", \"small\",\n \"A type of model. Possible options are: small, medium, large, sparselarge, validtestlarge.\")\nflags.DEFINE_string(\"data_path\", None,\n \"Where the training/test data is stored.\")\nflags.DEFINE_string(\"restore_path\", None,\n \"Model input directory.\")\nflags.DEFINE_string(\"config_file\", None,\n \"Parameter config file.\")\nflags.DEFINE_bool(\"use_fp16\", False,\n \"Train using 16-bit floats instead of 32bit floats\")\nflags.DEFINE_bool(\"display_weights\", False,\n \"Display weight matrix.\")\nflags.DEFINE_string(\"regularizer\", 'l1_regularizer',\n \"Regularizer type.\")\nflags.DEFINE_string(\"optimizer\", 'gd',\n \"Optimizer of sgd: gd and adam.\")\nflags.DEFINE_string(\"freeze_mode\", None,\n \"How to freeze zero weights.\")\nflags.DEFINE_integer(\"hidden_size1\", 1500,\n \"The hidden size of the first LSTM.\")\nflags.DEFINE_integer(\"hidden_size2\", 1500,\n \"The hidden size of the second LSTM.\")\n\nFLAGS = flags.FLAGS\n\ndef add_dimen_grouplasso(var, axis=0):\n with tf.name_scope(\"DimenGroupLasso\"):\n t = tf.square(var)\n t = tf.reduce_sum(t, axis=axis) + tf.constant(1.0e-8)\n t = tf.sqrt(t)\n reg = tf.reduce_sum(t)\n return reg\n\ndef add_structure_grouplasso(var, coupled_var, couple_split_num=2):\n with tf.name_scope(\"StructureGroupLasso\"):\n with tf.control_dependencies([tf.assert_equal(tf.size(tf.shape(var)), tf.constant(2)),\n tf.assert_equal(tf.size(tf.shape(coupled_var)), tf.constant(2))]):\n\n t1 = tf.square(var)\n t1_col_sum = tf.reduce_sum(t1, axis=0)\n t1_col_sum1, t1_col_sum2, t1_col_sum3, t1_col_sum4 = tf.split(t1_col_sum, 4)\n t1_row_sum = tf.reduce_sum(t1, axis=1)\n _, t1_row_sum2 = tf.split(t1_row_sum, 2)\n\n t2 = tf.square(coupled_var)\n t2_row_sum = tf.reduce_sum(t2, axis=1)\n t2_row_sums = zip(tf.split(t2_row_sum, couple_split_num))\n\n reg_sum = t1_row_sum2 + \\\n t1_col_sum1 + t1_col_sum2 + t1_col_sum3 + t1_col_sum4 + \\\n t2_row_sums[0]+ \\\n tf.constant(1.0e-8)\n reg_sqrt = tf.sqrt(reg_sum)\n reg = tf.reduce_sum(reg_sqrt)\n return reg\n\ndef add_blockwise_grouplasso(t, block_row_size, block_col_size):\n raise NotImplementedError('Not debugged. And the implementation is very slow when block is small.')\n with tf.name_scope(\"BlockGroupLasso\"):\n t = tf.expand_dims(tf.expand_dims(t,0),-1)\n blocks = tf.extract_image_patches(t,\n ksizes=[1, block_row_size, block_col_size, 1],\n strides=[1, block_row_size, block_col_size, 1],\n rates=[1, 1, 1, 1],\n padding='VALID')\n reg_sum = tf.constant(0.0)\n zero_blocks = 0.0\n total_blocks = 0.0\n blocks = tf.unstack(blocks) # list of 3-D tensors\n for b in blocks: # for each 3-D tensor\n for bb in tf.unstack(b): # for each 2-D tensor\n for block in tf.unstack(bb): # for each block\n blk_len = tf.sqrt(tf.reduce_sum(tf.square(block))) + tf.constant(1.0e-8)\n reg_sum = reg_sum + tf.cond(blk_len < zero_threshold,\n lambda: tf.constant(0.0),\n lambda: blk_len)\n\n # set them to zeros and calculate sparsity\n #block = tf.assign(block, tf.cond(blk_len < zero_threshold,\n # lambda: tf.zeros_like(block),\n # lambda: block))\n zero_blocks = zero_blocks + tf.cond( tf.equal(tf.reduce_sum(tf.square(block)), 0.0),\n lambda: tf.constant(1.0),\n lambda: tf.constant(0.0))\n total_blocks = total_blocks + 1.0\n return reg_sum, zero_blocks/total_blocks\n\ndef plot_tensor(t,title):\n if len(t.shape)==2:\n print(title)\n col_zero_idx = np.sum(np.abs(t), axis=0) == 0\n row_zero_idx = np.sum(np.abs(t), axis=1) == 0\n col_sparsity = (' column sparsity: %d/%d' % (sum(col_zero_idx), t.shape[1]) )\n row_sparsity = (' row sparsity: %d/%d' % (sum(row_zero_idx), t.shape[0]) )\n\n plt.figure()\n\n t = (t != 0)\n weight_scope = abs(t).max()\n plt.subplot(3, 1, 1)\n plt.imshow(t.reshape((t.shape[0], -1)),\n vmin=-weight_scope,\n vmax=weight_scope,\n cmap=plt.get_cmap('binary'),\n interpolation='none')\n plt.title(title)\n\n col_zero_map = np.tile(col_zero_idx, (t.shape[0], 1))\n row_zero_map = np.tile(row_zero_idx.reshape((t.shape[0], 1)), (1, t.shape[1]))\n zero_map = col_zero_map + row_zero_map\n zero_map_cp = zero_map.copy()\n plt.subplot(3,1,2)\n plt.imshow(zero_map_cp,cmap=plt.get_cmap('gray'),interpolation='none')\n plt.title(col_sparsity + row_sparsity)\n\n if 2*t.shape[0] == t.shape[1]:\n subsize = int(t.shape[0]/2)\n match_map = np.zeros(subsize,dtype=np.int)\n match_map = match_map + row_zero_idx[subsize:2 * subsize]\n for blk in range(0,4):\n match_map = match_map + col_zero_idx[blk*subsize : blk*subsize+subsize]\n match_idx = np.where(match_map == 5)[0]\n zero_map[subsize+match_idx,:] = False\n for blk in range(0, 4):\n zero_map[:,blk*subsize+match_idx] = False\n plt.subplot(3, 1, 3)\n plt.imshow(zero_map, cmap=plt.get_cmap('Reds'), interpolation='none')\n plt.title(' %d/%d matches' % (len(match_idx), sum(row_zero_idx[subsize:subsize*2])))\n else:\n print ('ignoring %s' % title)\n\ndef zerout_gradients_for_zero_weights(grads_and_vars, mode='element'):\n \"\"\" zerout gradients for weights with zero values, so as to freeze zero weights\n Args:\n grads_and_vars: Lists of (gradient, variable).\n mode: the mode to freeze weights.\n 'element': freeze all zero weights\n 'group': freeze rows/columns that are fully zeros\n \"\"\"\n gradients, variables = zip(*grads_and_vars)\n zerout_gradients = []\n for gradient, variable in zip(gradients, variables):\n if gradient is None:\n zerout_gradients.append(None)\n continue\n\n if mode=='element':\n where_cond = tf.less(tf.abs(variable), zero_threshold)\n elif mode=='group':\n raise NotImplementedError('Group wise freezing is not implemented yet.')\n else:\n raise ValueError('Unsupported mode == %s' % mode)\n\n zerout_gradient = tf.where(where_cond,\n tf.zeros_like(gradient),\n gradient)\n zerout_gradients.append(zerout_gradient)\n return list(zip(zerout_gradients, variables))\n\n\ndef data_type():\n return tf.float16 if FLAGS.use_fp16 else tf.float32\n\n\nclass PTBInput(object):\n \"\"\"The input data.\"\"\"\n\n def __init__(self, config, data, name=None):\n self.batch_size = batch_size = config.batch_size\n self.num_steps = num_steps = config.num_steps\n self.epoch_size = ((len(data) // batch_size) - 1) // num_steps\n self.input_data, self.targets = reader.ptb_producer(\n data, batch_size, num_steps, name=name)\n\n\nclass PTBModel(object):\n \"\"\"The PTB model.\"\"\"\n\n def __init__(self, is_training, config, input_, config_params = None):\n self._input = input_\n self.config_params = config_params\n\n batch_size = input_.batch_size\n num_steps = input_.num_steps\n size = config.hidden_size\n vocab_size = config.vocab_size\n\n # Slightly better results can be obtained with forget gate biases\n # initialized to 1 but the hyperparameters of the model would need to be\n # different than reported in the paper.\n def lstm_cell(size):\n # With the latest TensorFlow source code (as of Mar 27, 2017),\n # the BasicLSTMCell will need a reuse parameter which is unfortunately not\n # defined in TensorFlow 1.0. To maintain backwards compatibility, we add\n # an argument check here:\n if 'reuse' in inspect.getargspec(\n tf.contrib.rnn.BasicLSTMCell.__init__).args:\n return tf.contrib.rnn.BasicLSTMCell(\n size, forget_bias=0.0, state_is_tuple=True,\n reuse=tf.get_variable_scope().reuse)\n else:\n return tf.contrib.rnn.BasicLSTMCell(\n size, forget_bias=0.0, state_is_tuple=True)\n attn_cell = lstm_cell\n if is_training and config.keep_prob < 1:\n def attn_cell(size):\n return tf.contrib.rnn.DropoutWrapper(\n lstm_cell(size), output_keep_prob=config.keep_prob)\n cell = tf.contrib.rnn.MultiRNNCell(\n [attn_cell(FLAGS.hidden_size1), attn_cell(FLAGS.hidden_size2)], state_is_tuple=True)\n\n self._initial_state = cell.zero_state(batch_size, data_type())\n\n with tf.device(\"/cpu:0\"):\n embedding = tf.get_variable(\n \"embedding\", [vocab_size, size], dtype=data_type())\n inputs = tf.nn.embedding_lookup(embedding, input_.input_data)\n\n if is_training and config.keep_prob < 1:\n inputs = tf.nn.dropout(inputs, config.keep_prob)\n\n # Simplified version of models/tutorials/rnn/rnn.py's rnn().\n # This builds an unrolled LSTM for tutorial purposes only.\n # In general, use the rnn() or state_saving_rnn() from rnn.py.\n #\n # The alternative version of the code below is:\n #\n # inputs = tf.unstack(inputs, num=num_steps, axis=1)\n # outputs, state = tf.contrib.rnn.static_rnn(\n # cell, inputs, initial_state=self._initial_state)\n outputs = []\n state = self._initial_state\n with tf.variable_scope(\"RNN\"):\n for time_step in range(num_steps):\n if time_step > 0: tf.get_variable_scope().reuse_variables()\n (cell_output, state) = cell(inputs[:, time_step, :], state)\n outputs.append(cell_output)\n\n output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, FLAGS.hidden_size2])\n softmax_w = tf.get_variable(\n \"softmax_w\", [FLAGS.hidden_size2, vocab_size], dtype=data_type())\n softmax_b = tf.get_variable(\"softmax_b\", [vocab_size], dtype=data_type())\n logits = tf.matmul(output, softmax_w) + softmax_b\n loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n [logits],\n [tf.reshape(input_.targets, [-1])],\n [tf.ones([batch_size * num_steps], dtype=data_type())])\n\n # L1 regularization\n modname = importlib.import_module('tensorflow.contrib.layers')\n the_regularizer = getattr(modname, FLAGS.regularizer)(scale=config_params['weight_decay'], scope=FLAGS.regularizer)\n reg_loss = tf.contrib.layers.apply_regularization(the_regularizer, tf.trainable_variables()[1:])\n self._regularization = reg_loss\n\n sparsity = {}\n\n # Group Lasso regularization\n if config_params:\n glasso_params = config_params.get('grouplasso', None)\n else:\n glasso_params = None\n\n if glasso_params:\n for train_var in tf.trainable_variables():\n var_name = train_var.op.name\n glasso_param = glasso_params.get(var_name,None)\n if glasso_param:\n # column group lasso\n coef = glasso_params['global_decay'] * glasso_param.get('col_decay_multi', 0.0)\n if coef:\n glasso_reg = add_dimen_grouplasso(train_var, axis=0)\n self._regularization = self._regularization + glasso_reg * coef\n # row group lasso\n coef = glasso_params['global_decay']*glasso_param.get('row_decay_multi', 0.0)\n if coef:\n glasso_reg = add_dimen_grouplasso(train_var, axis=1)\n self._regularization = self._regularization + glasso_reg * coef\n # structure lasso\n coef = glasso_params['global_decay'] * glasso_param.get('structure_decay_multi', 0.0)\n if coef:\n # find the coupled layer/var\n coupled_train_var = None\n for _var in tf.trainable_variables():\n if _var.op.name == glasso_param['coupled_layer']:\n coupled_train_var = _var\n break\n couple_split_num = glasso_param.get('couple_split_num', 2)\n glasso_reg = add_structure_grouplasso(train_var, coupled_train_var, couple_split_num=couple_split_num)\n self._regularization = self._regularization + glasso_reg * coef\n\n if config_params['weight_decay'] > 0 or glasso_params:\n # sparsity statistcis\n for train_var in tf.trainable_variables():\n # zerout by small threshold to stablize the sparsity\n sp_name = train_var.op.name\n threshold = max(zero_threshold, 2*config_params['weight_decay'])\n where_cond = tf.less(tf.abs(train_var), threshold)\n train_var = tf.assign(train_var, tf.where(where_cond,\n tf.zeros(tf.shape(train_var)),\n train_var))\n # statistics\n s = tf.nn.zero_fraction(train_var)\n sparsity[sp_name + '_elt_sparsity'] = s\n if glasso_params and glasso_params.get(sp_name,None):\n s = tf.nn.zero_fraction(tf.reduce_sum(tf.square(train_var), axis=0))\n sparsity[sp_name + '_col_sparsity'] = s\n s = tf.nn.zero_fraction(tf.reduce_sum(tf.square(train_var), axis=1))\n sparsity[sp_name + '_row_sparsity'] = s\n self._sparsity = sparsity\n\n self._cost = cost = tf.reduce_sum(loss) / batch_size\n self._final_state = state\n\n if not is_training:\n return\n\n self._lr = tf.Variable(0.0, trainable=False)\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(cost + self._regularization, tvars),\n config.max_grad_norm)\n\n if 'gd' == FLAGS.optimizer:\n optimizer = tf.train.GradientDescentOptimizer(self._lr)\n elif 'adam' == FLAGS.optimizer:\n optimizer = tf.train.AdamOptimizer(self._lr)\n else:\n raise ValueError(\"Wrong optimizer!\")\n\n grads_vars = zip(grads, tvars)\n if FLAGS.freeze_mode:\n grads_vars = zerout_gradients_for_zero_weights(grads_vars, FLAGS.freeze_mode)\n self._train_op = optimizer.apply_gradients(\n grads_vars,\n global_step=tf.contrib.framework.get_or_create_global_step())\n\n self._new_lr = tf.placeholder(\n tf.float32, shape=[], name=\"new_learning_rate\")\n self._lr_update = tf.assign(self._lr, self._new_lr)\n\n def assign_lr(self, session, lr_value):\n session.run(self._lr_update, feed_dict={self._new_lr: lr_value})\n\n @property\n def input(self):\n return self._input\n\n @property\n def initial_state(self):\n return self._initial_state\n\n @property\n def cost(self):\n return self._cost\n\n @property\n def regularization(self):\n return self._regularization\n\n @property\n def sparsity(self):\n return self._sparsity\n\n @property\n def final_state(self):\n return self._final_state\n\n @property\n def lr(self):\n return self._lr\n\n @property\n def train_op(self):\n return self._train_op\n\n\nclass SmallConfig(object):\n \"\"\"Small config.\"\"\"\n def __init__(self):\n self.init_scale = 0.1\n self.learning_rate = 1.0\n self.max_grad_norm = 5\n self.num_layers = 2\n self.num_steps = 20\n self.hidden_size = 200\n self.max_epoch = 4\n self.max_max_epoch = 13\n self.keep_prob = 1.0\n self.lr_decay = 0.5\n self.batch_size = 20\n self.vocab_size = 10000\n\n\nclass MediumConfig(object):\n \"\"\"Medium config.\"\"\"\n def __init__(self):\n self.init_scale = 0.05\n self.learning_rate = 1.0\n self.max_grad_norm = 5\n self.num_layers = 2\n self.num_steps = 35\n self.hidden_size = 650\n self.max_epoch = 6\n self.max_max_epoch = 39\n self.keep_prob = 0.5\n self.lr_decay = 0.8\n self.batch_size = 20\n self.vocab_size = 10000\n\n\nclass LargeConfig(object):\n \"\"\"Large config.\"\"\"\n def __init__(self):\n self.init_scale = 0.04\n self.learning_rate = 1.0\n self.max_grad_norm = 10\n self.num_layers = 2\n self.num_steps = 35\n self.hidden_size = 1500\n self.max_epoch = 14\n self.max_max_epoch = 55\n self.keep_prob = 0.35\n self.lr_decay = 1 / 1.15\n self.batch_size = 20\n self.vocab_size = 10000\n\nclass SparseLargeConfig(object):\n \"\"\"Sparse Large config.\"\"\"\n def __init__(self):\n self.init_scale = 0.04\n self.learning_rate = 1.0\n self.max_grad_norm = 10\n self.num_layers = 2\n self.num_steps = 35\n self.hidden_size = 1500\n self.max_epoch = 14\n self.max_max_epoch = 55\n self.keep_prob = 0.60\n self.lr_decay = 0.1\n self.batch_size = 20\n self.vocab_size = 10000\n\nclass ValidTestLargeConfig(object):\n \"\"\"Large config.\"\"\"\n def __init__(self):\n self.init_scale = 0.04\n self.learning_rate = 0.0\n self.max_grad_norm = 10\n self.num_layers = 2\n self.num_steps = 35\n self.hidden_size = 1500\n self.max_epoch = 0\n self.max_max_epoch = 0\n self.keep_prob = 1.0\n self.lr_decay = 1.0\n self.batch_size = 20\n self.vocab_size = 10000\n\nclass TestConfig(object):\n \"\"\"Tiny config, for testing.\"\"\"\n def __init__(self):\n self.init_scale = 0.1\n self.learning_rate = 1.0\n self.max_grad_norm = 1\n self.num_layers = 1\n self.num_steps = 2\n self.hidden_size = 2\n self.max_epoch = 1\n self.max_max_epoch = 1\n self.keep_prob = 1.0\n self.lr_decay = 0.5\n self.batch_size = 20\n self.vocab_size = 10000\n\ndef fetch_sparsity(session, model, eval_op=None, verbose=False):\n outputs = {}\n\n fetches = {\n \"sparsity\": model.sparsity\n }\n\n vals = session.run(fetches)\n sparsity = vals[\"sparsity\"]\n outputs['sparsity'] = sparsity\n return outputs\n\n\ndef run_epoch(session, model, eval_op=None, verbose=False):\n \"\"\"Runs the model on the given data.\"\"\"\n start_time = time.time()\n outputs = {}\n regularizations = 0.0\n sparsity = {}\n costs = 0.0\n iters = 0\n state = session.run(model.initial_state)\n\n fetches = {\n \"cost\": model.cost,\n \"regularization\": model.regularization,\n \"final_state\": model.final_state,\n }\n if eval_op is not None:\n fetches[\"eval_op\"] = eval_op\n\n for step in range(model.input.epoch_size):\n feed_dict = {}\n for i, (c, h) in enumerate(model.initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n\n costs += cost\n regularizations += vals[\"regularization\"]\n sparsity = session.run(model.sparsity)\n iters += model.input.num_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f cost: %.4f regularization: %.4f total_cost: %.4f speed: %.0f wps\" %\n (step * 1.0 / model.input.epoch_size,\n np.exp(costs / iters),\n costs / iters,\n regularizations / iters,\n costs / iters + regularizations / iters,\n iters * model.input.batch_size / (time.time() - start_time)))\n\n outputs['perplexity'] = np.exp(costs / iters)\n outputs['cross_entropy'] = costs / iters\n outputs['regularization'] = regularizations / iters\n outputs['total_cost'] = costs / iters + regularizations / iters\n outputs['sparsity'] = sparsity\n return outputs\n\n\ndef get_config():\n if FLAGS.model == \"small\":\n return SmallConfig()\n elif FLAGS.model == \"medium\":\n return MediumConfig()\n elif FLAGS.model == \"large\":\n return LargeConfig()\n elif FLAGS.model == \"sparselarge\":\n return SparseLargeConfig()\n elif FLAGS.model == 'validtestlarge':\n return ValidTestLargeConfig()\n elif FLAGS.model == \"test\":\n return TestConfig()\n else:\n raise ValueError(\"Invalid model: %s\", FLAGS.model)\n\ndef restore_trainables(sess, path):\n if path:\n assert tf.gfile.Exists(path)\n ckpt = tf.train.get_checkpoint_state(path)\n if ckpt and ckpt.model_checkpoint_path:\n variables_to_restore = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n restorer = tf.train.Saver(variables_to_restore)\n if os.path.isabs(ckpt.model_checkpoint_path):\n restorer.restore(sess, ckpt.model_checkpoint_path)\n else:\n restorer.restore(sess, os.path.join(path,\n ckpt.model_checkpoint_path))\n print('Pre-trained model restored from %s' % path)\n else:\n print('Restoring pre-trained model from %s failed!' % path)\n exit()\n\ndef write_scalar_summary(summary_writer, tag, value, step):\n value = summary_pb2.Summary.Value(tag=tag, simple_value=float(value))\n summary = summary_pb2.Summary(value=[value])\n summary_writer.add_summary(summary, step)\n\ndef main(_):\n if not FLAGS.data_path:\n raise ValueError(\"Must set --data_path to PTB data directory\")\n if not FLAGS.config_file:\n raise ValueError(\"Must set --config_file to configuration file\")\n else:\n with open(FLAGS.config_file, 'r') as fi:\n config_params = json.load(fi)\n\n # get logger\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger('ptb_rnn')\n logger.setLevel(logging.INFO)\n # saving path\n subfolder_name = strftime(\"%Y-%m-%d___%H-%M-%S\", gmtime())\n config_params['save_path'] = os.path.join(config_params['save_path'], subfolder_name)\n if not os.path.exists(config_params['save_path']):\n os.mkdir(config_params['save_path'])\n else:\n raise IOError('%s exist!' % config_params['save_path'])\n\n log_file = os.path.join(config_params['save_path'], 'output.log')\n\n logger.addHandler(logging.FileHandler(log_file))\n logger.info('configurations in file:\\n %s \\n', config_params)\n logger.info('tf.FLAGS:\\n %s \\n', vars(FLAGS))\n\n raw_data = reader.ptb_raw_data(FLAGS.data_path)\n train_data, valid_data, test_data, _ = raw_data\n\n config = get_config()\n config.keep_prob = config_params.get('dropout_keep_prob',config.keep_prob)\n config.learning_rate = config_params.get('learning_rate', config.learning_rate)\n eval_config = get_config()\n eval_config.keep_prob = config_params.get('dropout_keep_prob',eval_config.keep_prob)\n eval_config.learning_rate = config_params.get('learning_rate', eval_config.learning_rate)\n eval_config.batch_size = 1\n eval_config.num_steps = 1\n logger.info('network configurations: \\n %s \\n', vars(config))\n\n with tf.Graph().as_default():\n\n initializer = tf.random_uniform_initializer(-config.init_scale,\n config.init_scale)\n\n with tf.name_scope(\"Train\"):\n train_input = PTBInput(config=config, data=train_data, name=\"TrainInput\")\n with tf.variable_scope(\"Model\", reuse=None, initializer=initializer):\n m = PTBModel(is_training=True, config=config, input_=train_input, config_params=config_params)\n\n with tf.name_scope(\"Valid\"):\n valid_input = PTBInput(config=config, data=valid_data, name=\"ValidInput\")\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n mvalid = PTBModel(is_training=False, config=config, input_=valid_input, config_params=config_params)\n\n with tf.name_scope(\"Test\"):\n test_input = PTBInput(config=eval_config, data=test_data, name=\"TestInput\")\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n mtest = PTBModel(is_training=False, config=eval_config,\n input_=test_input, config_params = config_params)\n\n saver = tf.train.Saver(tf.global_variables())\n # Build an initialization operation to run below.\n init = tf.global_variables_initializer()\n config_proto = tf.ConfigProto()\n config_proto.gpu_options.allow_growth = True\n config_proto.log_device_placement = False\n with tf.Session(config=config_proto) as session:\n coord = tf.train.Coordinator()\n session.run(init)\n threads = tf.train.start_queue_runners(sess=session, coord=coord)\n if FLAGS.restore_path:\n restore_trainables(session, FLAGS.restore_path)\n if FLAGS.display_weights:\n outputs = fetch_sparsity(session, mtest)\n print(\"Sparsity: %s\" % outputs['sparsity'])\n for train_var in tf.trainable_variables():\n plot_tensor(train_var.eval(), train_var.op.name)\n plt.show()\n\n outputs = run_epoch(session, mvalid)\n print(\"Restored model with Valid Perplexity: %.3f\" % (outputs['perplexity']))\n\n summary_writer = tf.summary.FileWriter(\n config_params['save_path'],\n graph=tf.get_default_graph())\n\n for i in range(config.max_max_epoch):\n if 'gd' == FLAGS.optimizer:\n if FLAGS.model == \"sparselarge\":\n lr_decay = config.lr_decay ** ( i // (config.max_max_epoch//3) )\n else:\n lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)\n elif 'adam' == FLAGS.optimizer:\n lr_decay = 1.0\n else:\n raise ValueError(\"Wrong optimizer!\")\n m.assign_lr(session, config.learning_rate * lr_decay)\n write_scalar_summary(summary_writer, 'learning_rate', config.learning_rate * lr_decay, i+1)\n\n print(\"Epoch: %d Learning rate: %.3f\" % (i + 1, session.run(m.lr)))\n outputs = run_epoch(session, m, eval_op=m.train_op,\n verbose=True)\n print(\"Epoch: %d Train Perplexity: %.3f regularization: %.4f \" % (i + 1, outputs['perplexity'], outputs['regularization']))\n write_scalar_summary(summary_writer, 'TrainPerplexity', outputs['perplexity'], i + 1)\n write_scalar_summary(summary_writer, 'cross_entropy', outputs['cross_entropy'], i + 1)\n write_scalar_summary(summary_writer, 'regularization', outputs['regularization'], i + 1)\n write_scalar_summary(summary_writer, 'total_cost', outputs['total_cost'], i + 1)\n for key, value in outputs['sparsity'].items():\n write_scalar_summary(summary_writer, key, value, i + 1)\n\n checkpoint_path = os.path.join(config_params['save_path'], 'model.ckpt')\n saver.save(session, checkpoint_path, global_step=i + 1)\n\n outputs = run_epoch(session, mvalid)\n print(\"Epoch: %d Valid Perplexity: %.3f\" % (i + 1, outputs['perplexity']))\n write_scalar_summary(summary_writer, 'ValidPerplexity', outputs['perplexity'], i + 1)\n\n outputs = run_epoch(session, mtest)\n print(\"Test Perplexity: %.3f\" % outputs['perplexity'])\n write_scalar_summary(summary_writer, 'TestPerplexity', outputs['perplexity'], 0)\n\n coord.request_stop()\n coord.join(threads)\n plt.show()\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.device", "tensorflow.gfile.Exists", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.global_variables", "matplotlib.pyplot.get_cmap", "tensorflow.train.AdamOptimizer", "tensorflow.get_default_graph", "numpy.exp", "numpy.where", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.random_uniform_initializer", "tensorflow.get_collection", "tensorflow.extract_image_patches", "tensorflow.gradients", "tensorflow.ConfigProto", "matplotlib.pyplot.subplot", "tensorflow.name_scope", "tensorflow.square", "tensorflow.Session", "tensorflow.trainable_variables", "tensorflow.train.Saver", "numpy.zeros", "matplotlib.pyplot.figure", "tensorflow.app.run", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.unstack", "matplotlib.pyplot.title", "tensorflow.shape", "tensorflow.train.Coordinator", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.zeros_like", "tensorflow.train.GradientDescentOptimizer", "tensorflow.split", "matplotlib.pyplot.show", "tensorflow.nn.embedding_lookup", "tensorflow.train.get_checkpoint_state", "tensorflow.constant", "numpy.abs", "tensorflow.train.start_queue_runners", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.assign", "tensorflow.reshape", "numpy.tile", "tensorflow.expand_dims", "tensorflow.core.framework.summary_pb2.Summary", "tensorflow.nn.zero_fraction", "tensorflow.variable_scope", "tensorflow.contrib.framework.get_or_create_global_step", "tensorflow.sqrt", "tensorflow.get_variable_scope", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
ArushiC/Disaster-Response-Pipelines
[ "e04b199a8b403e47bfdeee76edee8e3776110819" ]
[ "data/process_data.py" ]
[ "import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n df = messages.merge(categories, how='inner', on='id')\n return df\n\n\ndef clean_data(df):\n categories = df.categories.str.split(';',expand=True)\n row=categories.iloc[0,:]\n category_colnames = row.str.slice(0,-2)\n categories.columns = category_colnames\n \n for column in categories:\n categories[column] = categories[column].str.slice(-1)\n categories[column] = categories[column].astype('int')\n df.drop(['categories'], axis=1, inplace=True)\n df_copy = pd.concat([df, categories], axis=1)\n df_copy = df_copy.drop_duplicates()\n return df\n\n\ndef save_data(df, database_filename):\n engine = create_engine('sqlite:///'+database_filename+'.db')\n df.to_sql(database_filename, engine, index=False)\n pass \n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
VU-BEAM-Lab/DNNBeamforming
[ "e8ee8c1e57188a795816b119279ac2e60e5c5236" ]
[ "src/trainer.py" ]
[ "# Copyright 2020 Jaime Tierney, Adam Luchies, and Brett Byram\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the license at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and \n# limitations under the License.\n\nimport torch\n\nimport numpy as np\nimport time\nimport os\n\n\nclass Trainer():\n\n def __init__(self, model, loss, optimizer, loader_train,\n patience=None, scheduler=None,\n loader_train_eval=None, loader_val=None, cuda=None,\n logger=None, data_noise_gaussian=None, save_dir=None):\n \"\"\"\n \"\"\"\n super().__init__()\n self.model = model\n self.loss = loss\n self.optimizer = optimizer\n self.scheduler=scheduler\n self.patience = patience\n self.loader_train = loader_train\n self.loader_train_eval = loader_train_eval\n self.loader_val = loader_val\n self.cuda = cuda\n self.logger = logger\n self.data_noise_gaussian = data_noise_gaussian\n self.save_dir = save_dir\n\n\n\n def train_epoch(self):\n \"\"\" Train model for one epoch\n \"\"\"\n self.model.train()\n\n total_loss = 0\n for batch_idx, data in enumerate(self.loader_train):\n\n # add gaussian noise if enabled\n if self.data_noise_gaussian:\n X = data[0].numpy()\n SNR = np.random.uniform(1, 10**2)\n noise = np.random.randn(*X.shape)\n noise_power = np.sum(np.sum(noise ** 2))\n noise = noise / np.sqrt(noise_power)\n X_power = np.sum(np.sum(X ** 2))\n C = X_power / SNR\n X_noise = X + noise * np.sqrt(C)\n data[0] = torch.from_numpy(np.float32( X_noise) )\n\n inputs = data[0]\n targets = data[1]\n if self.cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n self.loss = self.loss.cuda()\n\n self.optimizer.zero_grad()\n outputs = self.model(inputs)\n loss = self.loss(outputs, targets)\n loss.backward()\n self.optimizer.step()\n\n # accumulate loss\n total_loss += loss.data.item()\n\n return total_loss / len(self.loader_train)\n\n\n\n def compute_loss(self, dat_loader):\n \"\"\" Compute model loss for provided data loader\n \"\"\"\n\n self.model.eval()\n\n device = torch.device(\"cuda:0\" if self.cuda else \"cpu\")\n\n total_loss = 0\n for batch_idx, data in enumerate(dat_loader):\n\n # add gaussian noise\n #if self.data_noise_gaussian:\n # X = data[0].numpy()\n # SNR = np.random.uniform(1, 10**2)\n # noise = np.random.randn(*X.shape)\n # noise_power = np.sum(np.sum(noise ** 2))\n # noise = noise / np.sqrt(noise_power)\n # X_power = np.sum(np.sum(X ** 2))\n # C = X_power / SNR\n # X_noise = X + noise * np.sqrt(C)\n # data[0] = torch.from_numpy(np.float32( X_noise) )\n\n inputs = data[0]\n targets = data[1]\n inputs = inputs.to(device)\n targets = targets.to(device)\n\n outputs = self.model(inputs)\n loss = self.loss(outputs, targets)\n\n # accumulate loss\n total_loss += loss.data.item()\n\n return total_loss / len(dat_loader)\n\n\n def train(self):\n \"\"\" Train the model\n \"\"\"\n\n # initial setup\n epoch = 1\n loss_val_best = 100\n num_epochs_increased = 0\n epoch_best = 1\n logs = {}\n\n # Perform training\n while True:\n\n # Run one iteration of SGD\n t0 = time.time()\n loss_train = self.train_epoch()\n loss_train_eval = self.compute_loss(self.loader_train_eval)\n loss_val = self.compute_loss(self.loader_val)\n time_epoch = time.time() - t0\n self.logger.add_entry( {'loss_train' : loss_train, \n 'loss_train_eval' : loss_train_eval,\n 'loss_val' : loss_val} ) \n\n # run learing rate scheduler\n if self.scheduler:\n self.scheduler.step(loss_val)\n\n # save logger info\n if self.save_dir:\n self.logger.append(os.path.join(self.save_dir, 'log.txt'))\n\n # change in loss_val\n d_loss_val = (loss_val-loss_val_best)/loss_val_best * 100\n\n # display results\n print('E: {:} / Train: {:.3e} / Valid: {:.3e} / Diff Valid: {:.2f}% / Diff Valid-Train: {:.1f}% / Time: {:.2f}'.format(epoch, loss_train_eval, loss_val, d_loss_val, (loss_val - loss_train_eval)/loss_train_eval*100, time_epoch))\n\n # if validation loss improves\n if d_loss_val < 0:\n num_epochs_increased = 0\n\n # record epoch and loss\n epoch_best = epoch\n loss_val_best = loss_val\n\n # save model weights\n if self.save_dir:\n print('Validation loss improved. Saving model.')\n torch.save(self.model.state_dict(), os.path.join(self.save_dir, 'model.dat'))\n\n else:\n num_epochs_increased = num_epochs_increased + 1\n\n # stop training if we lose patience:\n if num_epochs_increased > self.patience:\n break\n\n # advance epoch counter\n epoch = epoch + 1\n" ]
[ [ "numpy.sqrt", "numpy.random.randn", "numpy.float32", "torch.device", "numpy.random.uniform", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dhern023/ml-models
[ "87e26001e6d7a1c6287ea7a78362957954902a7c" ]
[ "tasks/_tasks_statsmodels.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nTasks for building statsmodel pipelines\n\"\"\"\n\nimport _workflow\n\nimport d6tflow\nimport pandas\nimport pathlib\nimport statsmodels.api as sm\n\nclass TaskLoadDataframe(d6tflow.tasks.TaskCachePandas):\n \"\"\"\n Loads in a processed dataframe.\n\n Can handle if the dataframe have been serialized as a pickle\n TODO: Handle JSON\n \"\"\"\n fdata=d6tflow.Parameter()\n\n def run(self):\n fname = pathlib.Path(self.fdata)\n if fname.suffix == '.pkl':\n dataframe = pandas.read_pickle(fname)\n else: # deafult to csv reader\n dataframe = pandas.read_csv(fname)\n self.save(dataframe)\n\[email protected](TaskLoadDataframe)\nclass TaskSetupExogEndogData(d6tflow.tasks.TaskCache):\n \"\"\"\n Will use all non label columns as covariates unless specified otherwise\n Optional: Convert categorical to one-hot for regression\n \"\"\"\n\n add_constant = d6tflow.BoolParameter(default = True)\n convert_to_one_hot = d6tflow.BoolParameter(default=False)\n columns_label = d6tflow.ListParameter()\n columns_covariates = d6tflow.ListParameter(default=None)\n\n persist=['data', 'labels']\n\n def run(self):\n dataframe = self.inputLoad()\n\n exog = dataframe.copy()\n endog = exog.loc[:, list(self.columns_label)]\n exog = exog.drop(columns=list(self.columns_label))\n\n # Uses all covaraites by default.\n if self.columns_covariates:\n exog = exog[list(self.columns_covariates)]\n\n if self.add_constant:\n exog = sm.add_constant(exog) # adds an intercept column\n if self.convert_to_one_hot:\n exog = pandas.get_dummies(exog) # Converts categorical to one-hot\n\n self.save( {'data' : exog, 'labels' : endog } )\n\[email protected]({'inputs' : TaskSetupExogEndogData})\nclass TaskSplitData(d6tflow.tasks.TaskCache):\n split_three_ways = d6tflow.BoolParameter(default = True)\n split_percentage = d6tflow.FloatParameter(default = 0.2)\n\n def run(self):\n data = self.inputLoad(as_dict=True)\n array_data = data['inputs']['data']\n array_labels = data['inputs']['labels']\n\n dict_datasets = {}\n\n if self.split_three_ways:\n dict_datasets['train_data'],\\\n dict_datasets['valid_data'],\\\n dict_datasets['eval_data'] = _workflow.split_three_ways(array_data)\n dict_datasets['train_labels'],\\\n dict_datasets['valid_labels'],\\\n dict_datasets['eval_labels'] = _workflow.split_three_ways(array_labels)\n\n else: # split two ways\n dict_datasets['train_data'],\\\n dict_datasets['eval_data'] = _workflow.split_two_ways(array_data, self.split_percentage)\n dict_datasets['train_labels'],\\\n dict_datasets['eval_labels'] = _workflow.split_two_ways(array_labels, self.split_percentage)\n\n self.save(dict_datasets)\n\n" ]
[ [ "pandas.read_pickle", "pandas.read_csv", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
jtpils/MFFNet
[ "ec517c57d7938c566a0900b79b41822ff1a9ab6f" ]
[ "utils/generate_pc_image.py" ]
[ "import copy\nimport numpy as np\nimport open3d as o3d\n\n\ndef load_data_files(data_filename):\n \"\"\"Function that loads data file into memory\n\n :param data_filename: str, the object that represents the original data file name\n :return: data_source: ndarray, the object that stores the data\n \"\"\"\n if data_filename[-3:] == 'txt':\n data_source = np.loadtxt(data_filename)\n return data_source\n elif data_filename[-3:] == 'npy':\n data_source = np.load(data_filename)\n return data_source\n else:\n print('Unknown file type: {}. Please check your input!'.format(data_filename))\n exit()\n\n\ndef main():\n data_source = load_data_files('./../data/stanford_indoor3d' + '/' + 'Area_1_conferenceRoom_1.npy')\n\n data_source[:, 0] = data_source[:, 0]/np.max(data_source[:, 0])\n data_source[:, 1] = data_source[:, 1]/np.max(data_source[:, 1])\n data_source[:, 2] = data_source[:, 2]/np.max(data_source[:, 2])\n\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(data_source[:, 0:3])\n pcd.colors = o3d.utility.Vector3dVector(data_source[:, 3:6]/255)\n\n o3d.io.write_point_cloud('./test.ply', pcd)\n\n o3d.visualization.draw_geometries([pcd])\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.max", "numpy.load", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WilliamZhai/FantasyBasketballAuctionDraftSimulation
[ "4b4aca065b0f389e885e8109e6d6f5b64efd358d" ]
[ "script.py" ]
[ "import numpy\nimport xlsxwriter\nimport copy\n\n# create file for simulations\nworkbook = xlsxwriter.Workbook(\"data.xlsx\")\n\n# globals\nupperline = workbook.add_format(\n {\n \"top\": 1,\n }\n)\n\nheader_border = workbook.add_format(\n {\n \"bottom\": 1,\n }\n)\n\n# snake draft order\ndef get_draft_order(num_drafter, num_rounds):\n order = []\n direction = 1\n\n i = 0\n while i < num_rounds / num_drafter:\n if direction % 2 == 1:\n for j in range(num_drafter):\n order.append(j)\n else:\n for j in range(num_drafter - 1, -1, -1):\n order.append(j)\n\n direction += 1\n i += 1\n return order\n\n# set auction order\ndef set_auction_order(bidders_valuation_copy):\n item_valuation_tuples = []\n\n for i in range(len(bidders_valuation_copy[0])):\n item_sum = 0\n for j in range(len(bidders_valuation_copy)):\n item_sum += bidders_valuation_copy[j][i]\n item_valuation_tuples.append((item_sum, i))\n\n # order it\n item_valuation_tuples.sort(key=lambda x: x[0])\n\n res = []\n for tup in item_valuation_tuples:\n res.append(tup[1])\n\n return res\n\n# calculate winner for second price auction \ndef get_winner_second_price_auctiion(bids):\n second_highest = 1\n highest = bids[0]\n winner = 0\n\n for i in range(1, len(bids)):\n if bids[i] > highest:\n second_highest = highest\n highest = bids[i]\n winner = i\n elif bids[i] > second_highest:\n second_highest = bids[i]\n \n\n # print(\"bidding round\", bids)\n # print(\" winner\", winner + 1)\n # print(\" price\", second_highest)\n return [winner, second_highest]\n\n# create 1 worksheet for a simluation\ndef create_sheet(sheet_name, num_bidder, num_item, gamma_input=3, gamma_scale=10, uniform_left=0, uniform_right=6):\n \n # =============================================== setup worksheet ===============================================\n sheet = workbook.add_worksheet(sheet_name)\n sheet.set_column(0, 0, 40)\n\n sheet.write(0, 0, \"SIMULATION INPUTS\", header_border)\n for i in range(1, num_item + 1):\n sheet.write(0, i, \"\", header_border)\n \n sheet.write(1, 0, \"number of bidders = \" + str(num_bidder))\n sheet.write(1, 1, \"number of items = \" + str(num_item))\n sheet.write(1, 4, \"common value: gamma distribution Γ = \" + str(gamma_input) + \" and scale = \" + str(gamma_scale))\n sheet.write(1, 10, \"private value: uniform distribution ~ [\" + str(uniform_left) + \",\" + str(uniform_right) + \"]\")\n\n row_start = 3\n\n sheet.write(row_start, 0, \"PLAYER VALUATIONS GENERATED\", header_border)\n for i in range(1, num_item + 1):\n sheet.write(row_start, i, \"\", header_border)\n sheet.write(row_start + 1, 0, \"Items\")\n \n row_start += 2\n # we will generate the common value component of each item with Γ = 3 (default)\n # a smaller gamma will indicate a more positive(right) skewed distribution\n # for examples see https://homepage.divms.uiowa.edu/~mbognar/applets/pois.html \n common_values = []\n for i in range(num_item):\n common_values.append(int(numpy.random.gamma(gamma_input, gamma_scale))) \n \n # for each bidder, we will generate the private value component of each item \n # using a uniform distribution between a and b (default a=0, b=6)\n private_values = [ [] for _ in range(num_bidder) ]\n for i in range(num_bidder):\n for j in range(num_item):\n private_values[i].append(int(numpy.random.uniform(uniform_left, uniform_right)))\n\n # write to workbook the common values \n sheet.write(row_start, 0, \"Common Values\")\n for x in range(num_item):\n sheet.write(row_start - 1, x + 1, \"item\" + str(x+1))\n sheet.write(row_start, x + 1, common_values[x])\n\n row_start = row_start + 2\n # write to workbook the private values \n for y in range(num_bidder):\n sheet.write(row_start + y, 0, \"bidder\" + str(y + 1) + \" private values\")\n for x in range(num_item):\n sheet.write(row_start + y, x + 1, private_values[y][x])\n \n # write the overall valuation of bidders and items\n row_start += num_bidder + 1\n\n for y in range(num_bidder):\n sheet.write(row_start + y, 0, \"bidder\" + str(y + 1) + \" total valuation\")\n\n for x in range(num_item):\n sheet.write(row_start + y, x + 1, private_values[y][x] + common_values[x])\n \n row_start += num_bidder\n\n # =============================================== a snake draft fomat ===============================================\n row_start = row_start + 1\n\n sheet.write(row_start, 0, \"SNAKE DRAFT RESULT\", header_border)\n for i in range(1, num_item + 1):\n sheet.write(row_start, i, \"\", header_border)\n\n # setup\n bidder_items = [ [] for _ in range(num_bidder) ]\n bidders_valuation_copy = copy.deepcopy(private_values)\n for y in range(num_bidder):\n for x in range(num_item):\n bidders_valuation_copy[y][x] = bidders_valuation_copy[y][x] + common_values[x]\n\n # compute each round\n row_start = row_start + 2\n drafter_order = get_draft_order(num_bidder, num_item)\n for bidder_index in (drafter_order):\n\n item = max(bidders_valuation_copy[bidder_index]) # highest value item for bidder i\n index = bidders_valuation_copy[bidder_index].index(item) # index of that item\n\n bidder_items[bidder_index].append(item) # add item to bidder i's list of items\n \n # remove that item from list of available items\n for j in range(num_bidder):\n bidders_valuation_copy[j].pop(index)\n \n # write to sheet\n for x in range(len(bidder_items[y])):\n sheet.write(row_start - 1, x + 1, \"pick\" + str(x + 1))\n sheet.write(row_start - 1, len(bidder_items[y]) + 1, \"total utility\")\n\n sum_total = 0\n for y in range(num_bidder):\n total = 0\n sheet.write(row_start + y, 0, \"bidder\" + str(y + 1) + \" item values: \")\n for x in range(len(bidder_items[y])):\n sheet.write(row_start + y, x + 1, bidder_items[y][x])\n total += bidder_items[y][x]\n sheet.write(row_start + y, x + 2, total)\n sum_total += total\n \n row_start += num_bidder\n sheet.write(row_start, x + 2, sum_total, upperline)\n\n\n # =============================================== second price budget auction ===============================================\n row_start += 2\n # generate fair budget from avg utility in snake draft\n budget = int((sum_total / 4) * 1.1)\n\n sheet.write(row_start, 0, \"SECOND PRICE BUDGET AUCTION RESULT\", header_border)\n sheet.write(row_start, 1, \"budget=\" + str(budget), header_border)\n for i in range(2, num_item + 1):\n sheet.write(row_start, i, \"\", header_border)\n \n # setup\n bidder_items = [ [] for _ in range(num_bidder) ]\n budgets = [ budget for _ in range(num_bidder) ]\n\n bidders_valuation_copy = copy.deepcopy(private_values)\n for y in range(num_bidder):\n for x in range(num_item):\n bidders_valuation_copy[y][x] = bidders_valuation_copy[y][x] + common_values[x]\n\n # set the auction order for items based on overall valuations\n auction_order = set_auction_order(bidders_valuation_copy)\n # print(auction_order)\n\n # compute each round\n row_start = row_start + 2\n\n for i in range(num_item):\n index = auction_order[i]\n bids = []\n for j in range(num_bidder):\n if len(bidder_items[j]) < (num_item / num_bidder):\n bid = min(bidders_valuation_copy[j][index], budgets[j]) # bids his valuation or remaining budget\n bids.append(bid)\n else:\n bid = 0\n bids.append(bid)\n\n # compute round\n [winner_index, price] = get_winner_second_price_auctiion(bids)\n\n # give item to the winner\n bidder_items[winner_index].append(bidders_valuation_copy[winner_index][index])\n\n # reduce winner's budget by the second highest bid\n budgets[winner_index] -= price\n\n # write to sheet\n for x in range(len(bidder_items[y])):\n sheet.write(row_start - 1, x + 1, \"item\" + str(x + 1))\n sheet.write(row_start - 1, len(bidder_items[y]) + 1, \"total utility\")\n\n sum_total = 0\n for y in range(num_bidder):\n total = 0\n sheet.write(row_start + y, 0, \"bidder\" + str(y + 1) + \" item values: \")\n for x in range(len(bidder_items[y])):\n sheet.write(row_start + y, x + 1, bidder_items[y][x])\n total += bidder_items[y][x]\n sheet.write(row_start + y, x + 2, total)\n sum_total += total\n \n row_start += num_bidder\n sheet.write(row_start, x + 2, sum_total, upperline)\n\n # =============================================== first price budget auction ===============================================\n row_start += 2\n # generate fair budget from avg utility in snake draft\n\n sheet.write(row_start, 0, \"FIRST PRICE BUDGET AUCTION RESULT\", header_border)\n sheet.write(row_start, 1, \"budget=\" + str(budget), header_border)\n for i in range(2, num_item + 1):\n sheet.write(row_start, i, \"\", header_border)\n \n # setup\n bidder_items = [ [] for _ in range(num_bidder) ]\n budgets = [ budget for _ in range(num_bidder) ]\n\n bidders_valuation_copy = copy.deepcopy(private_values)\n for y in range(num_bidder):\n for x in range(num_item):\n bidders_valuation_copy[y][x] = bidders_valuation_copy[y][x] + common_values[x]\n\n # set the auction order for items based on overall valuations\n auction_order = set_auction_order(bidders_valuation_copy)\n\n # compute each round\n row_start = row_start + 2\n\n for i in range(num_item):\n index = auction_order[i]\n bids = []\n for j in range(num_bidder):\n if len(bidder_items[j]) < (num_item / num_bidder):\n bid_from_strategy = int(bidders_valuation_copy[j][index] * num_bidder / (num_bidder - 1))\n bid = min(bid_from_strategy , budgets[j]) # bids his valuation or remaining budget\n bids.append(bid)\n else:\n bid = 0\n bids.append(bid)\n\n # compute round\n [winner_index, price] = get_winner_second_price_auctiion(bids)\n\n # give item to the winner\n bidder_items[winner_index].append(bidders_valuation_copy[winner_index][index])\n\n # reduce winner's budget by the second highest bid\n budgets[winner_index] -= price\n\n # write to sheet\n for x in range(len(bidder_items[y])):\n sheet.write(row_start - 1, x + 1, \"item\" + str(x + 1))\n sheet.write(row_start - 1, len(bidder_items[y]) + 1, \"total utility\")\n\n sum_total = 0\n for y in range(num_bidder):\n total = 0\n sheet.write(row_start + y, 0, \"bidder\" + str(y + 1) + \" item values: \")\n for x in range(len(bidder_items[y])):\n sheet.write(row_start + y, x + 1, bidder_items[y][x])\n total += bidder_items[y][x]\n sheet.write(row_start + y, x + 2, total)\n sum_total += total\n \n row_start += num_bidder\n sheet.write(row_start, x + 2, sum_total, upperline)\n\n\n# SET INPUT HERE\n\nfor i in range(10):\n create_sheet(\n sheet_name=\"sim\" + str(i + 1), \n num_bidder=2, \n num_item=10, \n gamma_input=3, \n gamma_scale=10, \n uniform_left=0, \n uniform_right=10)\n\nfor i in range(10, 20):\n create_sheet(\n sheet_name=\"sim\" + str(i + 1), \n num_bidder=4, \n num_item=24, \n gamma_input=3, \n gamma_scale=10, \n uniform_left=0, \n uniform_right=10)\n\nfor i in range(20, 30):\n create_sheet(\n sheet_name=\"sim\" + str(i + 1), \n num_bidder=8, \n num_item=64, \n gamma_input=3, \n gamma_scale=10, \n uniform_left=0, \n uniform_right=10)\n\nworkbook.close()" ]
[ [ "numpy.random.uniform", "numpy.random.gamma" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Zhaopudark/GANs-TF2-MinxedPrecision
[ "2fc2f027254e9d61beca6342a1f63854e6f96393" ]
[ "implementations/DCGAN-MNIST-WGP-BN/model.py" ]
[ "import networks \nimport tensorflow as tf\nimport time\nimport os \nimport sys\nbase = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.join(base,'../../'))\nfrom basemodels.GanLosses import GanLoss\nfrom basemodels.GanOptimizers import Adam\n\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\n\nimport numpy as np\nfrom PIL import Image\nimport datetime\n###############global paraments###################\n\"\"\"\n记录那些无法被模型定义传递的参数\n尤其是@tf.function() 中需要的参数\n学习率与损失函数系数则应当在模型训练过程中予以控制\n\"\"\"\nglobal_input_X_shape = [None,100]\nglobal_input_Y_shape = [None,28,28,1]\n################################################\nclass DCGAN(tf.keras.Model):\n \"\"\"\n 模型只负责给定训练集和测试(验证)集后的操作\n \"\"\"\n def __init__(self,\n train_set,\n test_set,\n loss_name=\"Vanilla\",\n mixed_precision=False,\n learning_rate=2e-4,\n tmp_path=None,\n out_path=None):\n super(DCGAN,self).__init__()\n #接收数据集和相关参数\n self.train_set = train_set\n self.test_set = test_set\n self.tmp_path = tmp_path\n self.out_path = out_path\n #定义模型\n self.G = networks.Generator(name=\"G\")\n if loss_name in [\"WGAN-SN\",\"WGAN-GP-SN\"]:\n self.D = networks.Discriminator(name=\"If_is_real\",use_sigmoid=False,sn=True)\n self.loss_name = loss_name[:-3]\n elif loss_name in [\"WGAN\",\"WGAN-GP\"]:\n self.D = networks.Discriminator(name=\"If_is_real\",use_sigmoid=False,sn=False)\n self.loss_name = loss_name\n elif loss_name in [\"Vanilla\",\"LSGAN\"]:\n self.D = networks.Discriminator(name=\"If_is_real\",use_sigmoid=True,sn=False)\n self.loss_name = loss_name\n else: \n raise ValueError(\"Do not support the loss \"+loss_name)\n\n self.model_list=[self.G,self.D]\n #定义损失函数 优化器 记录等\n self.gan_loss = GanLoss(self.loss_name)\n self.optimizers_list = self.optimizers_config(mixed_precision=mixed_precision,learning_rate=learning_rate)\n self.mixed_precision = mixed_precision\n self.matrics_list = self.matrics_config()\n self.checkpoint_config()\n self.get_seed()\n def build(self,input_shape_G,input_shape_D):\n \"\"\"\n input_shape必须切片 因为在底层会被当做各层的输出shape而被改动\n \"\"\"\n self.G.build(input_shape=input_shape_G[:])#G X->Y\n self.D.build(input_shape=input_shape_D[:])#D Y or not Y\n self.built = True\n def optimizers_config(self,mixed_precision=False,learning_rate=2e-4):\n self.G_optimizer = Adam(learning_rate=1e-4,beta_1=0.0,beta_2=0.9)\n self.D_optimizer = Adam(learning_rate=1e-4,beta_1=0.0,beta_2=0.9)\n if mixed_precision:\n self.G_optimizer=self.G_optimizer.get_mixed_precision()\n self.D_optimizer=self.D_optimizer.get_mixed_precision()\n return [self.G_optimizer,self.D_optimizer]\n def matrics_config(self):\n current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n train_logdir = self.tmp_path+\"/logs/\" + current_time\n self.train_summary_writer = tf.summary.create_file_writer(train_logdir)\n return []\n # return None\n def checkpoint_config(self):\n self.ckpt = tf.train.Checkpoint(step=tf.Variable(1),optimizer=self.optimizers_list,model=self.model_list,dataset=self.train_set)\n self.manager = tf.train.CheckpointManager(self.ckpt,self.tmp_path+'/tf_ckpts', max_to_keep=3)\n def pix_gradient(self,x):\n x = tf.reshape(x,shape=[1,64,64,1])#在各batch和通道上进行像素梯度 对2D单通道而言其实没必要reshape\n dx,dy = tf.image.image_gradients(x)\n return dx,dy\n\n @tf.function(input_signature=[tf.TensorSpec(shape=global_input_X_shape,dtype=tf.float32),\\\n tf.TensorSpec(shape=global_input_Y_shape,dtype=tf.float32),\\\n tf.TensorSpec(shape=[4],dtype=tf.int32),\\\n tf.TensorSpec(shape=[1],dtype=tf.uint32)])\n def train_step_D(self,trainX,trainY,y_shape,step):\n with tf.GradientTape(persistent=True) as D_tape:\n GeneratedY = self.G(trainX)\n D_real_out = self.D(trainY)\n D_fake_out = self.D(GeneratedY)\n\n e = tf.random.uniform(shape=y_shape,minval=0.0,maxval=1.0)\n mid_Y = e*trainY+(1-e)*GeneratedY\n with tf.GradientTape() as GP:\n GP.watch(mid_Y)\n inner_loss = self.D(mid_Y)\n penalty = GP.gradient(inner_loss,mid_Y)\n # print(\"penalty\",penalty.shape)\n # penalty_norm = 10.0*tf.math.square(tf.maximum(tf.norm(penalty,ord='euclidean'),1.0)-1) #\n penalty_norm = 10.0*tf.math.square(tf.norm(tf.reshape(penalty,[y_shape[0],-1]),axis=-1,ord=2)-1)#这是按照算法愿意\n # print(\"penalty_norm\",penalty_norm.shape)\n D_loss = self.gan_loss.DiscriminatorLoss(D_real_out,D_fake_out)+tf.reduce_mean(penalty_norm)\n\n if self.mixed_precision:\n scaled_D_loss = self.D_optimizer.get_scaled_loss(D_loss)\n if self.mixed_precision:\n scaled_gradients_of_D=D_tape.gradient(scaled_D_loss,self.D.trainable_variables)\n gradients_of_D = self.D_optimizer.get_unscaled_gradients(scaled_gradients_of_D)\n else:\n gradients_of_D = D_tape.gradient(D_loss,self.D.trainable_variables)\n \n self.D_optimizer.apply_gradients(zip(gradients_of_D,self.D.trainable_variables))\n\n return D_loss\n\n \n @tf.function(input_signature=[tf.TensorSpec(shape=global_input_X_shape,dtype=tf.float32),\\\n tf.TensorSpec(shape=global_input_Y_shape,dtype=tf.float32),\\\n tf.TensorSpec(shape=[4],dtype=tf.int32),\\\n tf.TensorSpec(shape=[1],dtype=tf.uint32)])\n def train_step_G(self,trainX,trainY,y_shape,step):\n with tf.GradientTape(persistent=True) as G_tape:\n GeneratedY = self.G(trainX)\n # Dy_real_out = self.Dy(trainY)\n D_fake_out = self.D(GeneratedY)\n\n G_loss = self.gan_loss.GeneratorLoss(D_fake_out)\n\n if self.mixed_precision:\n scaled_G_loss = self.G_optimizer.get_scaled_loss(G_loss)\n if self.mixed_precision:\n scaled_gradients_of_G=G_tape.gradient(scaled_G_loss,self.G.trainable_variables)\n gradients_of_G = self.G_optimizer.get_unscaled_gradients(scaled_gradients_of_G)\n else:\n gradients_of_G = G_tape.gradient(G_loss,self.G.trainable_variables)\n\n self.G_optimizer.apply_gradients(zip(gradients_of_G,self.G.trainable_variables))\n return G_loss\n \n\n @tf.function(input_signature=[tf.TensorSpec(shape=global_input_X_shape,dtype=tf.float32),\\\n tf.TensorSpec(shape=global_input_Y_shape,dtype=tf.float32),\\\n tf.TensorSpec(shape=[4],dtype=tf.int32),\\\n tf.TensorSpec(shape=[1],dtype=tf.uint32)])\n def train_step(self,trainX,trainY,y_shape,step):\n with tf.GradientTape(persistent=True) as gan_type:\n GeneratedY = self.G(trainX)\n D_real_out = self.D(trainY)\n D_fake_out = self.D(GeneratedY)\n\n D_loss = self.gan_loss.DiscriminatorLoss(D_real_out,D_fake_out)\n G_loss = self.gan_loss.GeneratorLoss(D_fake_out)\n\n if self.mixed_precision:\n scaled_D_loss = self.D_optimizer.get_scaled_loss(D_loss)\n scaled_G_loss = self.G_optimizer.get_scaled_loss(G_loss)\n\n if self.mixed_precision:\n scaled_gradients_of_D=gan_type.gradient(scaled_D_loss,self.D.trainable_variables)\n scaled_gradients_of_G=gan_type.gradient(scaled_G_loss,self.G.trainable_variables)\n gradients_of_D = self.D_optimizer.get_unscaled_gradients(scaled_gradients_of_D)\n gradients_of_G = self.G_optimizer.get_unscaled_gradients(scaled_gradients_of_G)\n else:\n gradients_of_D = gan_type.gradient(D_loss,self.D.trainable_variables)\n gradients_of_G = gan_type.gradient(G_loss,self.G.trainable_variables)\n \n self.D_optimizer.apply_gradients(zip(gradients_of_D,self.D.trainable_variables))\n self.G_optimizer.apply_gradients(zip(gradients_of_G,self.G.trainable_variables))\n return D_loss,G_loss\n\n def train(self,epoches):\n self.ckpt.restore(self.manager.latest_checkpoint)\n for _ in range(epoches):\n start = time.time()\n for trainX,trainY in self.train_set:\n self.ckpt.step.assign_add(1)\n step = int(self.ckpt.step)\n if self.loss_name in [\"WGAN\",\"WGAN-GP\"]:\n for __ in range(5):\n D_loss = self.train_step_D(trainX,trainY,\n tf.constant([trainY.shape[0],1,1,1],shape=[4],dtype=tf.int32),\n tf.constant(step,shape=[1],dtype=tf.uint32))\n for __ in range(1):\n G_loss = self.train_step_G(trainX,trainY,\n tf.constant([trainY.shape[0],1,1,1],shape=[4],dtype=tf.int32),\n tf.constant(step,shape=[1],dtype=tf.uint32))\n elif self.loss_name in [\"Vanilla\",\"LSGAN\"]:\n D_loss,G_loss = self.train_step(trainX,trainY,\n tf.constant([trainY.shape[0],1,1,1],shape=[4],dtype=tf.int32),\n tf.constant(step,shape=[1],dtype=tf.uint32))\n else:\n raise ValueError(\"Inner Error\")\n \n if step % 100 == 0:\n save_path = self.manager.save()\n print(\"Saved checkpoint for step {}: {}\".format(step,save_path))\n self.G.save_weights(self.tmp_path+'/weights_saved/G.ckpt')\n self.D.save_weights(self.tmp_path+'/weights_saved/D.ckpt')\n self.wirte_summary(step=step,\n seed=self.seed,\n G=self.G,\n G_loss=G_loss,\n D_loss=D_loss,\n out_path=self.out_path)\n print ('Time to next 100 step {} is {} sec'.format(step,time.time()-start))\n start = time.time()\n def test(self,take_nums):\n out_path = self.out_path+\"/test\"\n import os\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n self.ckpt.restore(self.manager.latest_checkpoint)\n seed_get = iter(self.test_set)\n for take in range(take_nums):\n plt.figure(figsize=(10,10))#图片大一点才可以承载像素\n for i in range(100):\n single_seed = next(seed_get)\n GeneratedY = self.G(single_seed,training=False)\n plt.subplot(10,10,(i+1))\n plt.imshow(GeneratedY[0,:,:,0],cmap='gray')\n plt.axis('off')\n plt.savefig(out_path+'/image_at_{}.png'.format(take))\n plt.close()\n\n def get_seed(self):\n self.seed = []\n seed_get = iter(self.test_set)\n for _ in range(100):\n seed = next(seed_get)\n self.seed.append(seed) \n\n def wirte_summary(self,step,seed,G,G_loss,D_loss,out_path):\n plt.figure(figsize=(10,10))#图片大一点才可以承载像素\n for i,single_seed in enumerate(seed):\n GeneratedY = G(single_seed,training=False)\n plt.subplot(10,10,(i+1))\n plt.imshow(GeneratedY[0,:,:,0],cmap='gray')\n plt.axis('off')\n plt.savefig(out_path+'/image_at_{}.png'.format(step))\n plt.close()\n img = Image.open(out_path+'/image_at_{}.png'.format(step))\n img = tf.reshape(np.array(img),shape=(1,1000,1000,4))\n\n with self.train_summary_writer.as_default():\n ##########################\n tf.summary.scalar('G_loss',G_loss,step=step)\n tf.summary.scalar('D_loss',D_loss,step=step)\n tf.summary.image(\"img\",img,step=step)\n\nif __name__ == \"__main__\":\n y = tf.constant([128,1,1,1],shape=[4],dtype=tf.int32)\n print(list(y.numpy()))" ]
[ [ "matplotlib.pyplot.imshow", "tensorflow.summary.scalar", "tensorflow.Variable", "tensorflow.summary.image", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure", "tensorflow.train.CheckpointManager", "matplotlib.pyplot.switch_backend", "tensorflow.random.uniform", "numpy.array", "tensorflow.GradientTape", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.image.image_gradients", "tensorflow.TensorSpec", "tensorflow.summary.create_file_writer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pedrodiamel/kaggle-imaterialist
[ "8e9dfbc7f09eb8f93b44d7c4c5518911d16176ac" ]
[ "torchlib/classifierneuralnet.py" ]
[ "\n# STD MODULES \nimport os\nimport math\nimport shutil\nimport time\nimport numpy as np\nfrom tqdm import tqdm\n\n# TORCH MODULE\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# PYTVISION MODULE\nfrom pytvision.neuralnet import NeuralNetAbstract\nfrom pytvision.logger import Logger, AverageFilterMeter, AverageMeter\nfrom pytvision import utils as pytutils\nfrom pytvision import graphic as gph\nfrom pytvision import netlearningrate\n\n# LOCAL MODULE\nfrom . import netmodels as nnmodels\nfrom . import netlosses as nloss\n\n\nclass NeuralNetClassifier(NeuralNetAbstract):\n \"\"\"\n Convolutional Neural Net \n \"\"\"\n\n def __init__(self,\n patchproject,\n nameproject,\n no_cuda=True,\n parallel=False,\n seed=1,\n print_freq=10,\n gpu=0\n ):\n \"\"\"\n Initialization\n Args:\n @patchproject (str): path project\n @nameproject (str): name project\n @no_cuda (bool): system cuda (default is True)\n @parallel (bool)\n @seed (int)\n @print_freq (int)\n @gpu (int)\n \"\"\"\n\n super(NeuralNetClassifier, self).__init__( patchproject, nameproject, no_cuda, parallel, seed, print_freq, gpu )\n\n \n def create(self, \n arch, \n num_output_channels, \n num_input_channels, \n loss, \n lr, \n momentum, \n optimizer, \n lrsch, \n pretrained=False,\n topk=(1,),\n ):\n \"\"\"\n Create\n Args:\n @arch (string): architecture\n @num_output_channels, \n @num_input_channels, \n @loss (string):\n @lr (float): learning rate\n @momentum,\n @optimizer (string) : \n @lrsch (string): scheduler learning rate\n @pretrained (bool)\n \"\"\"\n super(NeuralNetClassifier, self).create( arch, num_output_channels, num_input_channels, loss, lr, momentum, optimizer, lrsch, pretrained)\n self.accuracy = nloss.Accuracy( topk )\n self.cnf = nloss.ConfusionMeter( self.num_output_channels, normalized=True )\n self.visheatmap = gph.HeatMapVisdom( env_name=self.nameproject )\n\n # Set the graphic visualization\n self.metrics_name = [ 'top{}'.format(k) for k in topk ]\n self.logger_train = Logger( 'Trn', ['loss'], self.metrics_name, self.plotter )\n self.logger_val = Logger( 'Val', ['loss'], self.metrics_name, self.plotter )\n \n\n \n def training(self, data_loader, epoch=0):\n\n self.logger_train.reset()\n data_time = AverageMeter()\n batch_time = AverageMeter()\n\n # switch to evaluate mode\n self.net.train()\n\n end = time.time()\n for i, sample in enumerate(data_loader):\n \n # measure data loading time\n data_time.update(time.time() - end)\n # get data (image, label)\n inputs, targets = sample['image'], pytutils.argmax(sample['label'])\n batch_size = inputs.size(0)\n\n if self.cuda:\n targets = targets.cuda( non_blocking=True )\n inputs_var = Variable(inputs.cuda(), requires_grad=False)\n targets_var = Variable(targets.cuda(), requires_grad=False)\n else:\n inputs_var = Variable(inputs, requires_grad=False)\n targets_var = Variable(targets, requires_grad=False)\n\n # fit (forward)\n outputs = self.net(inputs_var)\n\n # measure accuracy and record loss\n loss = self.criterion(outputs, targets_var) \n pred = self.accuracy(outputs.data, targets )\n \n # optimizer\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n # update\n self.logger_train.update(\n {'loss': loss.data[0] },\n dict(zip(self.metrics_name, [pred[p][0] for p in range(len(self.metrics_name)) ])), \n batch_size,\n )\n \n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % self.print_freq == 0: \n self.logger_train.logger( epoch, epoch + float(i+1)/len(data_loader), i, len(data_loader), batch_time, )\n \n\n def evaluate(self, data_loader, epoch=0):\n \n self.logger_val.reset()\n self.cnf.reset()\n batch_time = AverageMeter()\n \n\n # switch to evaluate mode\n self.net.eval()\n with torch.no_grad():\n end = time.time()\n for i, sample in enumerate(data_loader):\n \n # get data (image, label)\n inputs, targets = sample['image'], pytutils.argmax(sample['label'])\n batch_size = inputs.size(0)\n\n if self.cuda:\n targets = targets.cuda( non_blocking=True )\n inputs_var = Variable(inputs.cuda(), requires_grad=False, volatile=True)\n targets_var = Variable(targets.cuda(), requires_grad=False, volatile=True)\n else:\n inputs_var = Variable(inputs, requires_grad=False, volatile=True)\n targets_var = Variable(targets, requires_grad=False, volatile=True)\n \n # fit (forward)\n outputs = self.net(inputs_var)\n\n # measure accuracy and record loss\n loss = self.criterion(outputs, targets_var) \n pred = self.accuracy(outputs.data, targets )\n self.cnf.add( outputs.argmax(1) , targets_var ) \n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # update\n self.logger_val.update(\n {'loss': loss.data[0] },\n dict(zip(self.metrics_name, [pred[p][0] for p in range(len(self.metrics_name)) ])), \n batch_size,\n )\n\n if i % self.print_freq == 0:\n self.logger_val.logger(\n epoch, epoch, i,len(data_loader), \n batch_time, \n bplotter=False,\n bavg=True, \n bsummary=False,\n )\n\n #save validation loss\n self.vallosses = self.logger_val.info['loss']['loss'].avg\n acc = self.logger_val.info['metrics']['top1'].avg\n\n self.logger_val.logger(\n epoch, epoch, i, len(data_loader), \n batch_time,\n bplotter=True,\n bavg=True, \n bsummary=True,\n )\n \n print('Confusion Matriz')\n print(self.cnf.value(), flush=True)\n print('\\n')\n \n self.visheatmap.show('Confusion Matriz', self.cnf.value()) \n return acc\n \n #def _to_end_epoch(self, epoch, epochs, train_loader, val_loader):\n #print('>> Reset', flush=True )\n #w = 1-self.cnf.value().diagonal() \n #train_loader.dataset.reset(w)\n \n\n def test(self, data_loader):\n \n n = len(data_loader)*data_loader.batch_size\n Yhat = np.zeros((n, self.num_output_channels ))\n Y = np.zeros((n,1) )\n k=0\n\n # switch to evaluate mode\n self.net.eval()\n with torch.no_grad():\n end = time.time()\n for i, sample in enumerate( tqdm(data_loader) ):\n \n # get data (image, label)\n inputs = sample['image'] \n targets = pytutils.argmax(sample['label']) \n \n x = inputs.cuda() if self.cuda else inputs \n x = Variable(x, requires_grad=False, volatile=True )\n \n # fit (forward)\n yhat = self.net(x)\n yhat = F.softmax(yhat, dim=1) \n yhat = pytutils.to_np(yhat)\n \n for j in range(yhat.shape[0]):\n Y[k] = targets[j]\n Yhat[k,:] = yhat[j] \n k+=1 \n\n #print( 'Test:', i , flush=True )\n\n Yhat = Yhat[:k,:]\n Y = Y[:k]\n \n return Yhat, Y\n \n def predict(self, data_loader):\n \n n = len(data_loader)*data_loader.batch_size\n Yhat = np.zeros((n, self.num_output_channels ))\n Ids = np.zeros((n,1) )\n k=0\n\n # switch to evaluate mode\n self.net.eval()\n with torch.no_grad():\n end = time.time()\n for i, (Id, inputs) in enumerate( tqdm(data_loader) ):\n \n # get data (image, label)\n #inputs = sample['image'] \n #Id = sample['id']\n \n x = inputs.cuda() if self.cuda else inputs \n x = Variable(x, requires_grad=False, volatile=True )\n \n # fit (forward)\n yhat = self.net(x)\n yhat = F.softmax(yhat, dim=1) \n yhat = pytutils.to_np(yhat)\n \n for j in range(yhat.shape[0]):\n Yhat[k,:] = yhat[j]\n Ids[k] = Id[j] \n k+=1 \n\n Yhat = Yhat[:k,:]\n Ids = Ids[:k]\n \n return Ids, Yhat\n \n def __call__(self, image): \n \n # switch to evaluate mode\n self.net.eval()\n with torch.no_grad():\n x = image.cuda() if self.cuda else image \n x = Variable(x, requires_grad=False, volatile=True )\n msoft = nn.Softmax()\n yhat = msoft(self.net(x))\n yhat = pytutils.to_np(yhat)\n\n return yhat\n\n\n def representation(self, data_loader):\n \"\"\"\"\n Representation\n -data_loader: simple data loader for image\n \"\"\"\n \n # switch to evaluate mode\n self.net.eval()\n\n n = len(data_loader)*data_loader.batch_size\n k=0\n\n # embebed features \n embX = np.zeros([n,self.net.dim])\n embY = np.zeros([n,1])\n\n batch_time = AverageMeter()\n end = time.time()\n for i, sample in enumerate(data_loader):\n \n # get data (image, label)\n inputs, targets = sample['image'], pytutils.argmax(sample['label'])\n inputs_var = pytutils.to_var(inputs, self.cuda, False, True )\n\n # representation\n emb = self.net.representation(inputs_var)\n emb = pytutils.to_np(emb)\n for j in range(emb.shape[0]):\n embX[k,:] = emb[j,:]\n embY[k] = targets[j]\n k+=1\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n print('Representation: |{:06d}/{:06d}||{batch_time.val:.3f} ({batch_time.avg:.3f})|'.format(i,len(data_loader), batch_time=batch_time) )\n\n\n embX = embX[:k,:]\n embY = embY[:k]\n\n return embX, embY\n \n def _create_model(self, arch, num_output_channels, num_input_channels, pretrained):\n \"\"\"\n Create model\n @arch (string): select architecture\n @num_classes (int)\n @num_channels (int)\n @pretrained (bool)\n \"\"\" \n\n self.net = None\n self.size_input = 0 \n \n kw = {'num_classes': num_output_channels, 'num_channels': num_input_channels, 'pretrained': pretrained}\n self.net = nnmodels.__dict__[arch](**kw)\n \n self.s_arch = arch\n self.size_input = self.net.size_input \n self.num_output_channels = num_output_channels\n self.num_input_channels = num_input_channels\n\n if self.cuda == True:\n self.net.cuda()\n if self.parallel == True and self.cuda == True:\n self.net = nn.DataParallel(self.net, device_ids= range( torch.cuda.device_count() ))\n\n def _create_loss(self, loss):\n\n # create loss\n if loss == 'cross':\n self.criterion = nn.CrossEntropyLoss().cuda()\n elif loss == 'mse':\n self.criterion = nn.MSELoss(size_average=True).cuda()\n elif loss == 'l1':\n self.criterion = nn.L1Loss(size_average=True).cuda()\n else:\n assert(False)\n\n self.s_loss = loss\n\n" ]
[ [ "torch.nn.Softmax", "torch.nn.functional.softmax", "torch.nn.CrossEntropyLoss", "torch.no_grad", "torch.nn.L1Loss", "torch.cuda.device_count", "numpy.zeros", "torch.nn.MSELoss", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JiajunBernoulli/douban-short-commentary
[ "8ab411172bc33739e49b446818681165e9797e01" ]
[ "utils/draw_pic.py" ]
[ "import imageio\nfrom wordcloud import WordCloud\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom config import TOP_NUM, FONT_PATH, PIC_PATH, FILE_PATH\n\"\"\"\n Created by Jiajun·Bernoulli on 2019/1/18\n\"\"\"\n###########################绘制柱状图#####################\ndef draw_bar(labels, quants):\n # -*- coding: utf-8 -*-\n print(labels)\n # 指定默认字体\n matplotlib.rcParams['font.sans-serif'] = ['SimHei']\n matplotlib.rcParams['font.family'] = 'sans-serif'\n # 解决负号'-'显示为方块的问题\n matplotlib.rcParams['axes.unicode_minus'] = False\n plt.bar(range(len(quants)), quants, color='rgb', tick_label=labels)\n plt.show()\n\n##########################绘制词云########################\ndef draw_wordCloud(data):\n my_wordcloud = WordCloud(\n background_color='white', # 设置背景颜色\n max_words=TOP_NUM, # 设置最大实现的字数\n font_path=FONT_PATH, # 设置字体格式,如不设置显示不了中文\n mask=imageio.imread(PIC_PATH), # 设置图片样式\n width=800,\n height=800,\n ).generate_from_frequencies(data)\n plt.figure()\n plt.imshow(my_wordcloud)\n plt.axis('off')\n plt.show() # 展示词云\n my_wordcloud.to_file(FILE_PATH)" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wi905252/ukb-cardiac-mri
[ "3177dde898a65b1d7f385b78e4f134de3852bea5" ]
[ "ukb/ensemble.py" ]
[ "import argparse\nimport pandas as pd\nfrom os import makedirs\nfrom os.path import isdir\n\nfrom ensemble import *\n\ndef main(args):\n pd.set_option(\"display.width\", 100)\n\n if args.pids_csv is not None:\n pids = list(pd.read_csv(args.pids_csv)[args.pids_key])\n else:\n pids = None\n\n if args.output_dir is None:\n output_dir = \"{}/ensemble\".format(args.results_dir)\n else:\n output_dir = args.output_dir\n \n if not isdir(output_dir):\n makedirs(output_dir)\n\n if args.output_name is None:\n output_name = \"ensemble\"\n else:\n output_name = args.output_name\n\n experiment = Ensemble.from_folder(args.results_dir, args.dev_dir, pids=pids)\n _ = experiment.median_vote(metric=args.metric)\n _ = experiment.mv_vote()\n\n if experiment.score:\n print(experiment.score_dataframe)\n experiment.score_dataframe.to_csv(\"{}/{}_score.csv\".format(output_dir, output_name), index=False)\n\n experiment.proba_dataframe.to_csv(\"{}/{}_proba.csv\".format(output_dir, output_name), index=False)\n experiment.pred_dataframe.to_csv(\"{}/{}_pred.csv\".format(output_dir, output_name), index=False)\n print(\"Ensembled results are saved into {}.\".format(output_dir))\n\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser()\n argparser.add_argument(\"--results_dir\", type=str, required=True, help=\"the folder where the results are\")\n argparser.add_argument(\"--dev_dir\", type=str, required=True, help=\"the folder where the devset results are\")\n argparser.add_argument(\"--metric\", type=str, default=\"f1_score\", help=\"the metric for tuning threshold\")\n argparser.add_argument(\"--pids_csv\", type=str, default=None, help=\"the csv of pids to filter the results\")\n argparser.add_argument(\"--pids_key\", type=str, default=\"ID\", help=\"the label for pids in the csv: ID/PID/etc.\")\n argparser.add_argument(\"--output_dir\", type=str, default=None, help=\"folder to save the ensembled results\")\n argparser.add_argument(\"--output_name\", type=str, default=None, help=\"name used to save the ensembled results\")\n\n args = argparser.parse_args()\n main(args)\n" ]
[ [ "pandas.set_option", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
mariusgruenewald/HARK
[ "6fd5eeefe424775a0849b41d1480aef1190063a7" ]
[ "HARK/ConsumptionSaving/ConsIndShockModel.py" ]
[ "\"\"\"\nClasses to solve canonical consumption-savings models with idiosyncratic shocks\nto income. All models here assume CRRA utility with geometric discounting, no\nbequest motive, and income shocks are fully transitory or fully permanent.\n\nIt currently solves three types of models:\n 1) A very basic \"perfect foresight\" consumption-savings model with no uncertainty.\n 2) A consumption-savings model with risk over transitory and permanent income shocks.\n 3) The model described in (2), with an interest rate for debt that differs\n from the interest rate for savings.\n\nSee NARK https://HARK.githhub.io/Documentation/NARK for information on variable naming conventions.\nSee HARK documentation for mathematical descriptions of the models being solved.\n\"\"\"\n\nfrom builtins import str\nfrom builtins import range\nfrom builtins import object\nfrom copy import copy, deepcopy\nimport numpy as np\nfrom scipy.optimize import newton\nfrom HARK import AgentType, NullFunc, HARKobject, makeOnePeriodOOSolver\nfrom HARK.utilities import warnings # Because of \"patch\" to warnings modules\nfrom HARK.interpolation import CubicInterp, LowerEnvelope, LinearInterp\nfrom HARK.distribution import Lognormal, MeanOneLogNormal, Uniform\nfrom HARK.distribution import (\n DiscreteDistribution,\n addDiscreteOutcomeConstantMean,\n combineIndepDstns,\n)\nfrom HARK.utilities import (\n makeGridExpMult,\n CRRAutility,\n CRRAutilityP,\n CRRAutilityPP,\n CRRAutilityP_inv,\n CRRAutility_invP,\n CRRAutility_inv,\n CRRAutilityP_invP,\n)\nfrom HARK import _log\nfrom HARK import set_verbosity_level\n\n\n__all__ = [\n \"ConsumerSolution\",\n \"ValueFunc\",\n \"MargValueFunc\",\n \"MargMargValueFunc\",\n \"ConsPerfForesightSolver\",\n \"ConsIndShockSetup\",\n \"ConsIndShockSolverBasic\",\n \"ConsIndShockSolver\",\n \"ConsKinkedRsolver\",\n \"PerfForesightConsumerType\",\n \"IndShockConsumerType\",\n \"KinkedRconsumerType\",\n \"init_perfect_foresight\",\n \"init_idiosyncratic_shocks\",\n \"init_kinked_R\",\n \"init_lifecycle\",\n \"init_cyclical\",\n]\n\nutility = CRRAutility\nutilityP = CRRAutilityP\nutilityPP = CRRAutilityPP\nutilityP_inv = CRRAutilityP_inv\nutility_invP = CRRAutility_invP\nutility_inv = CRRAutility_inv\nutilityP_invP = CRRAutilityP_invP\n\n# =====================================================================\n# === Classes that help solve consumption-saving models ===\n# =====================================================================\n\n\nclass ConsumerSolution(HARKobject):\n \"\"\"\n A class representing the solution of a single period of a consumption-saving\n problem. The solution must include a consumption function and marginal\n value function.\n\n Here and elsewhere in the code, Nrm indicates that variables are normalized\n by permanent income.\n \"\"\"\n\n distance_criteria = [\"vPfunc\"]\n\n def __init__(\n self,\n cFunc=None,\n vFunc=None,\n vPfunc=None,\n vPPfunc=None,\n mNrmMin=None,\n hNrm=None,\n MPCmin=None,\n MPCmax=None,\n ):\n \"\"\"\n The constructor for a new ConsumerSolution object.\n\n Parameters\n ----------\n cFunc : function\n The consumption function for this period, defined over market\n resources: c = cFunc(m).\n vFunc : function\n The beginning-of-period value function for this period, defined over\n market resources: v = vFunc(m).\n vPfunc : function\n The beginning-of-period marginal value function for this period,\n defined over market resources: vP = vPfunc(m).\n vPPfunc : function\n The beginning-of-period marginal marginal value function for this\n period, defined over market resources: vPP = vPPfunc(m).\n mNrmMin : float\n The minimum allowable market resources for this period; the consump-\n tion function (etc) are undefined for m < mNrmMin.\n hNrm : float\n Human wealth after receiving income this period: PDV of all future\n income, ignoring mortality.\n MPCmin : float\n Infimum of the marginal propensity to consume this period.\n MPC --> MPCmin as m --> infinity.\n MPCmax : float\n Supremum of the marginal propensity to consume this period.\n MPC --> MPCmax as m --> mNrmMin.\n\n Returns\n -------\n None\n \"\"\"\n # Change any missing function inputs to NullFunc\n self.cFunc = cFunc if cFunc is not None else NullFunc()\n self.vFunc = vFunc if vFunc is not None else NullFunc()\n self.vPfunc = vPfunc if vPfunc is not None else NullFunc()\n # vPFunc = NullFunc() if vPfunc is None else vPfunc\n self.vPPfunc = vPPfunc if vPPfunc is not None else NullFunc()\n self.mNrmMin = mNrmMin\n self.hNrm = hNrm\n self.MPCmin = MPCmin\n self.MPCmax = MPCmax\n\n def appendSolution(self, new_solution):\n \"\"\"\n Appends one solution to another to create a ConsumerSolution whose\n attributes are lists. Used in ConsMarkovModel, where we append solutions\n *conditional* on a particular value of a Markov state to each other in\n order to get the entire solution.\n\n Parameters\n ----------\n new_solution : ConsumerSolution\n The solution to a consumption-saving problem; each attribute is a\n list representing state-conditional values or functions.\n\n Returns\n -------\n None\n \"\"\"\n if type(self.cFunc) != list:\n # Then we assume that self is an empty initialized solution instance.\n # Begin by checking this is so.\n assert (\n NullFunc().distance(self.cFunc) == 0\n ), \"appendSolution called incorrectly!\"\n\n # We will need the attributes of the solution instance to be lists. Do that here.\n self.cFunc = [new_solution.cFunc]\n self.vFunc = [new_solution.vFunc]\n self.vPfunc = [new_solution.vPfunc]\n self.vPPfunc = [new_solution.vPPfunc]\n self.mNrmMin = [new_solution.mNrmMin]\n else:\n self.cFunc.append(new_solution.cFunc)\n self.vFunc.append(new_solution.vFunc)\n self.vPfunc.append(new_solution.vPfunc)\n self.vPPfunc.append(new_solution.vPPfunc)\n self.mNrmMin.append(new_solution.mNrmMin)\n\n\nclass ValueFunc(HARKobject):\n \"\"\"\n A class for representing a value function. The underlying interpolation is\n in the space of (m,u_inv(v)); this class \"re-curves\" to the value function.\n \"\"\"\n\n distance_criteria = [\"func\", \"CRRA\"]\n\n def __init__(self, vFuncNvrs, CRRA):\n \"\"\"\n Constructor for a new value function object.\n\n Parameters\n ----------\n vFuncNvrs : function\n A real function representing the value function composed with the\n inverse utility function, defined on market resources: u_inv(vFunc(m))\n CRRA : float\n Coefficient of relative risk aversion.\n\n Returns\n -------\n None\n \"\"\"\n self.func = deepcopy(vFuncNvrs)\n self.CRRA = CRRA\n\n def __call__(self, m):\n \"\"\"\n Evaluate the value function at given levels of market resources m.\n\n Parameters\n ----------\n m : float or np.array\n Market resources (normalized by permanent income) whose value is to\n be found.\n\n Returns\n -------\n v : float or np.array\n Lifetime value of beginning this period with market resources m; has\n same size as input m.\n \"\"\"\n return utility(self.func(m), gam=self.CRRA)\n\n\nclass MargValueFunc(HARKobject):\n \"\"\"\n A class for representing a marginal value function in models where the\n standard envelope condition of v'(m) = u'(c(m)) holds (with CRRA utility).\n \"\"\"\n\n distance_criteria = [\"cFunc\", \"CRRA\"]\n\n def __init__(self, cFunc, CRRA):\n \"\"\"\n Constructor for a new marginal value function object.\n\n Parameters\n ----------\n cFunc : function\n A real function representing the marginal value function composed\n with the inverse marginal utility function, defined on market\n resources: uP_inv(vPfunc(m)). Called cFunc because when standard\n envelope condition applies, uP_inv(vPfunc(m)) = cFunc(m).\n CRRA : float\n Coefficient of relative risk aversion.\n\n Returns\n -------\n None\n \"\"\"\n self.cFunc = deepcopy(cFunc)\n self.CRRA = CRRA\n\n def __call__(self, m):\n \"\"\"\n Evaluate the marginal value function at given levels of market resources m.\n\n Parameters\n ----------\n m : float or np.array\n Market resources (normalized by permanent income) whose marginal\n value is to be found.\n\n Returns\n -------\n vP : float or np.array\n Marginal lifetime value of beginning this period with market\n resources m; has same size as input m.\n \"\"\"\n return utilityP(self.cFunc(m), gam=self.CRRA)\n\n def derivative(self, m):\n \"\"\"\n Evaluate the derivative of the marginal value function at given levels\n of market resources m; this is the marginal marginal value function.\n\n Parameters\n ----------\n m : float or np.array\n Market resources (normalized by permanent income) whose marginal\n marginal value is to be found.\n\n Returns\n -------\n vPP : float or np.array\n Marginal marginal lifetime value of beginning this period with market\n resources m; has same size as input m.\n \"\"\"\n c, MPC = self.cFunc.eval_with_derivative(m)\n return MPC * utilityPP(c, gam=self.CRRA)\n\n\nclass MargMargValueFunc(HARKobject):\n \"\"\"\n A class for representing a marginal marginal value function in models where\n the standard envelope condition of v'(m) = u'(c(m)) holds (with CRRA utility).\n \"\"\"\n\n distance_criteria = [\"cFunc\", \"CRRA\"]\n\n def __init__(self, cFunc, CRRA):\n \"\"\"\n Constructor for a new marginal marginal value function object.\n\n Parameters\n ----------\n cFunc : function\n A real function representing the marginal value function composed\n with the inverse marginal utility function, defined on market\n resources: uP_inv(vPfunc(m)). Called cFunc because when standard\n envelope condition applies, uP_inv(vPfunc(m)) = cFunc(m).\n CRRA : float\n Coefficient of relative risk aversion.\n\n Returns\n -------\n None\n \"\"\"\n self.cFunc = deepcopy(cFunc)\n self.CRRA = CRRA\n\n def __call__(self, m):\n \"\"\"\n Evaluate the marginal marginal value function at given levels of market\n resources m.\n\n Parameters\n ----------\n m : float or np.array\n Market resources (normalized by permanent income) whose marginal\n marginal value is to be found.\n\n Returns\n -------\n vPP : float or np.array\n Marginal marginal lifetime value of beginning this period with market\n resources m; has same size as input m.\n \"\"\"\n c, MPC = self.cFunc.eval_with_derivative(m)\n return MPC * utilityPP(c, gam=self.CRRA)\n\n\n# =====================================================================\n# === Classes and functions that solve consumption-saving models ===\n# =====================================================================\n\n\nclass ConsPerfForesightSolver(HARKobject):\n \"\"\"\n A class for solving a one period perfect foresight consumption-saving problem.\n An instance of this class is created by the function solvePerfForesight in each period.\n \"\"\"\n\n def __init__(\n self,\n solution_next,\n DiscFac,\n LivPrb,\n CRRA,\n Rfree,\n PermGroFac,\n BoroCnstArt,\n MaxKinks,\n ):\n \"\"\"\n Constructor for a new ConsPerfForesightSolver.\n\n Parameters\n ----------\n solution_next : ConsumerSolution\n The solution to next period's one-period problem.\n DiscFac : float\n Intertemporal discount factor for future utility.\n LivPrb : float\n Survival probability; likelihood of being alive at the beginning of\n the next period.\n CRRA : float\n Coefficient of relative risk aversion.\n Rfree : float\n Risk free interest factor on end-of-period assets.\n PermGroFac : float\n Expected permanent income growth factor at the end of this period.\n BoroCnstArt : float or None\n Artificial borrowing constraint, as a multiple of permanent income.\n Can be None, indicating no artificial constraint.\n MaxKinks : int\n Maximum number of kink points to allow in the consumption function;\n additional points will be thrown out. Only relevant in infinite\n horizon model with artificial borrowing constraint.\n\n Returns:\n ----------\n None\n \"\"\"\n # We ask that HARK users define single-letter variables they use in a dictionary\n # attribute called notation. Do that first.\n\n self.notation = {\n \"a\": \"assets after all actions\",\n \"m\": \"market resources at decision time\",\n \"c\": \"consumption\",\n }\n self.assignParameters(\n solution_next=solution_next,\n DiscFac=DiscFac,\n LivPrb=LivPrb,\n CRRA=CRRA,\n Rfree=Rfree,\n PermGroFac=PermGroFac,\n BoroCnstArt=BoroCnstArt,\n MaxKinks=MaxKinks,\n )\n\n def defUtilityFuncs(self):\n \"\"\"\n Defines CRRA utility function for this period (and its derivatives),\n saving them as attributes of self for other methods to use.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n self.u = lambda c: utility(c, gam=self.CRRA) # utility function\n self.uP = lambda c: utilityP(c, gam=self.CRRA) # marginal utility function\n self.uPP = lambda c: utilityPP(\n c, gam=self.CRRA\n ) # marginal marginal utility function\n\n def defValueFuncs(self):\n \"\"\"\n Defines the value and marginal value functions for this period.\n Uses the fact that for a perfect foresight CRRA utility problem,\n if the MPC in period t is :math:`\\kappa_{t}`, and relative risk\n aversion :math:`\\rho`, then the inverse value vFuncNvrs has a\n constant slope of :math:`\\kappa_{t}^{-\\rho/(1-\\rho)}` and\n vFuncNvrs has value of zero at the lower bound of market resources\n mNrmMin. See PerfForesightConsumerType.ipynb documentation notebook\n for a brief explanation and the links below for a fuller treatment.\n\n https://econ.jhu.edu/people/ccarroll/public/lecturenotes/consumption/PerfForesightCRRA/#vFuncAnalytical\n https://econ.jhu.edu/people/ccarroll/SolvingMicroDSOPs/#vFuncPF\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n\n # See the PerfForesightConsumerType.ipynb documentation notebook for the derivations\n vFuncNvrsSlope = self.MPCmin ** (-self.CRRA / (1.0 - self.CRRA))\n vFuncNvrs = LinearInterp(\n np.array([self.mNrmMinNow, self.mNrmMinNow + 1.0]),\n np.array([0.0, vFuncNvrsSlope]),\n )\n self.vFunc = ValueFunc(vFuncNvrs, self.CRRA)\n self.vPfunc = MargValueFunc(self.cFunc, self.CRRA)\n\n def makePFcFunc(self):\n \"\"\"\n Makes the (linear) consumption function for this period.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n # Use a local value of BoroCnstArt to prevent comparing None and float below.\n if self.BoroCnstArt is None:\n BoroCnstArt = -np.inf\n else:\n BoroCnstArt = self.BoroCnstArt\n\n # Calculate human wealth this period\n self.hNrmNow = (self.PermGroFac / self.Rfree) * (self.solution_next.hNrm + 1.0)\n\n # Calculate the lower bound of the marginal propensity to consume\n PatFac = ((self.Rfree * self.DiscFacEff) ** (1.0 / self.CRRA)) / self.Rfree\n self.MPCmin = 1.0 / (1.0 + PatFac / self.solution_next.MPCmin)\n\n # Extract the discrete kink points in next period's consumption function;\n # don't take the last one, as it only defines the extrapolation and is not a kink.\n mNrmNext = self.solution_next.cFunc.x_list[:-1]\n cNrmNext = self.solution_next.cFunc.y_list[:-1]\n\n # Calculate the end-of-period asset values that would reach those kink points\n # next period, then invert the first order condition to get consumption. Then\n # find the endogenous gridpoint (kink point) today that corresponds to each kink\n aNrmNow = (self.PermGroFac / self.Rfree) * (mNrmNext - 1.0)\n cNrmNow = (self.DiscFacEff * self.Rfree) ** (-1.0 / self.CRRA) * (\n self.PermGroFac * cNrmNext\n )\n mNrmNow = aNrmNow + cNrmNow\n\n # Add an additional point to the list of gridpoints for the extrapolation,\n # using the new value of the lower bound of the MPC.\n mNrmNow = np.append(mNrmNow, mNrmNow[-1] + 1.0)\n cNrmNow = np.append(cNrmNow, cNrmNow[-1] + self.MPCmin)\n\n # If the artificial borrowing constraint binds, combine the constrained and\n # unconstrained consumption functions.\n if BoroCnstArt > mNrmNow[0]:\n # Find the highest index where constraint binds\n cNrmCnst = mNrmNow - BoroCnstArt\n CnstBinds = cNrmCnst < cNrmNow\n idx = np.where(CnstBinds)[0][-1]\n\n if idx < (mNrmNow.size - 1):\n # If it is not the *very last* index, find the the critical level\n # of mNrm where the artificial borrowing contraint begins to bind.\n d0 = cNrmNow[idx] - cNrmCnst[idx]\n d1 = cNrmCnst[idx + 1] - cNrmNow[idx + 1]\n m0 = mNrmNow[idx]\n m1 = mNrmNow[idx + 1]\n alpha = d0 / (d0 + d1)\n mCrit = m0 + alpha * (m1 - m0)\n\n # Adjust the grids of mNrm and cNrm to account for the borrowing constraint.\n cCrit = mCrit - BoroCnstArt\n mNrmNow = np.concatenate(([BoroCnstArt, mCrit], mNrmNow[(idx + 1) :]))\n cNrmNow = np.concatenate(([0.0, cCrit], cNrmNow[(idx + 1) :]))\n\n else:\n # If it *is* the very last index, then there are only three points\n # that characterize the consumption function: the artificial borrowing\n # constraint, the constraint kink, and the extrapolation point.\n mXtra = (cNrmNow[-1] - cNrmCnst[-1]) / (1.0 - self.MPCmin)\n mCrit = mNrmNow[-1] + mXtra\n cCrit = mCrit - BoroCnstArt\n mNrmNow = np.array([BoroCnstArt, mCrit, mCrit + 1.0])\n cNrmNow = np.array([0.0, cCrit, cCrit + self.MPCmin])\n\n # If the mNrm and cNrm grids have become too large, throw out the last\n # kink point, being sure to adjust the extrapolation.\n if mNrmNow.size > self.MaxKinks:\n mNrmNow = np.concatenate((mNrmNow[:-2], [mNrmNow[-3] + 1.0]))\n cNrmNow = np.concatenate((cNrmNow[:-2], [cNrmNow[-3] + self.MPCmin]))\n\n # Construct the consumption function as a linear interpolation.\n self.cFunc = LinearInterp(mNrmNow, cNrmNow)\n\n # Calculate the upper bound of the MPC as the slope of the bottom segment.\n self.MPCmax = (cNrmNow[1] - cNrmNow[0]) / (mNrmNow[1] - mNrmNow[0])\n\n # Add two attributes to enable calculation of steady state market resources.\n self.ExIncNext = 1.0 # Perfect foresight income of 1\n self.mNrmMinNow = mNrmNow[0] # Relabeling for compatibility with addSSmNrm\n\n def addSSmNrm(self, solution):\n \"\"\"\n Finds steady state (normalized) market resources and adds it to the\n solution. This is the level of market resources such that the expectation\n of market resources in the next period is unchanged. This value doesn't\n necessarily exist.\n\n Parameters\n ----------\n solution : ConsumerSolution\n Solution to this period's problem, which must have attribute cFunc.\n Returns\n -------\n solution : ConsumerSolution\n Same solution that was passed, but now with the attribute mNrmSS.\n \"\"\"\n # Make a linear function of all combinations of c and m that yield mNext = mNow\n mZeroChangeFunc = (\n lambda m: (1.0 - self.PermGroFac / self.Rfree) * m\n + (self.PermGroFac / self.Rfree) * self.ExIncNext\n )\n\n # Find the steady state level of market resources\n searchSSfunc = lambda m: solution.cFunc(m) - mZeroChangeFunc(\n m\n ) # A zero of this is SS market resources\n m_init_guess = (\n self.mNrmMinNow + self.ExIncNext\n ) # Minimum market resources plus next income is okay starting guess\n try:\n mNrmSS = newton(searchSSfunc, m_init_guess)\n except:\n mNrmSS = None\n\n # Add mNrmSS to the solution and return it\n solution.mNrmSS = mNrmSS\n return solution\n\n def solve(self):\n \"\"\"\n Solves the one period perfect foresight consumption-saving problem.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n solution : ConsumerSolution\n The solution to this period's problem.\n \"\"\"\n self.defUtilityFuncs()\n self.DiscFacEff = self.DiscFac * self.LivPrb\n self.makePFcFunc()\n self.defValueFuncs()\n solution = ConsumerSolution(\n cFunc=self.cFunc,\n vFunc=self.vFunc,\n vPfunc=self.vPfunc,\n mNrmMin=self.mNrmMinNow,\n hNrm=self.hNrmNow,\n MPCmin=self.MPCmin,\n MPCmax=self.MPCmax,\n )\n solution = self.addSSmNrm(solution)\n return solution\n\n\n###############################################################################\n###############################################################################\nclass ConsIndShockSetup(ConsPerfForesightSolver):\n \"\"\"\n A superclass for solvers of one period consumption-saving problems with\n constant relative risk aversion utility and permanent and transitory shocks\n to income. Has methods to set up but not solve the one period problem.\n \"\"\"\n\n def __init__(\n self,\n solution_next,\n IncomeDstn,\n LivPrb,\n DiscFac,\n CRRA,\n Rfree,\n PermGroFac,\n BoroCnstArt,\n aXtraGrid,\n vFuncBool,\n CubicBool,\n ):\n \"\"\"\n Constructor for a new solver-setup for problems with income subject to\n permanent and transitory shocks.\n\n Parameters\n ----------\n solution_next : ConsumerSolution\n The solution to next period's one period problem.\n IncomeDstn : [np.array]\n A list containing three arrays of floats, representing a discrete\n approximation to the income process between the period being solved\n and the one immediately following (in solution_next). Order: event\n probabilities, permanent shocks, transitory shocks.\n LivPrb : float\n Survival probability; likelihood of being alive at the beginning of\n the succeeding period.\n DiscFac : float\n Intertemporal discount factor for future utility.\n CRRA : float\n Coefficient of relative risk aversion.\n Rfree : float\n Risk free interest factor on end-of-period assets.\n PermGroFac : float\n Expected permanent income growth factor at the end of this period.\n BoroCnstArt: float or None\n Borrowing constraint for the minimum allowable assets to end the\n period with. If it is less than the natural borrowing constraint,\n then it is irrelevant; BoroCnstArt=None indicates no artificial bor-\n rowing constraint.\n aXtraGrid: np.array\n Array of \"extra\" end-of-period asset values-- assets above the\n absolute minimum acceptable level.\n vFuncBool: boolean\n An indicator for whether the value function should be computed and\n included in the reported solution.\n CubicBool: boolean\n An indicator for whether the solver should use cubic or linear inter-\n polation.\n\n Returns\n -------\n None\n \"\"\"\n self.assignParameters(\n solution_next=solution_next,\n IncomeDstn=IncomeDstn,\n LivPrb=LivPrb,\n DiscFac=DiscFac,\n CRRA=CRRA,\n Rfree=Rfree,\n PermGroFac=PermGroFac,\n BoroCnstArt=BoroCnstArt,\n aXtraGrid=aXtraGrid,\n vFuncBool=vFuncBool,\n CubicBool=CubicBool,\n )\n self.defUtilityFuncs()\n\n def defUtilityFuncs(self):\n \"\"\"\n Defines CRRA utility function for this period (and its derivatives,\n and their inverses), saving them as attributes of self for other methods\n to use.\n\n Parameters\n ----------\n none\n\n Returns\n -------\n none\n \"\"\"\n ConsPerfForesightSolver.defUtilityFuncs(self)\n self.uPinv = lambda u: utilityP_inv(u, gam=self.CRRA)\n self.uPinvP = lambda u: utilityP_invP(u, gam=self.CRRA)\n self.uinvP = lambda u: utility_invP(u, gam=self.CRRA)\n if self.vFuncBool:\n self.uinv = lambda u: utility_inv(u, gam=self.CRRA)\n\n def setAndUpdateValues(self, solution_next, IncomeDstn, LivPrb, DiscFac):\n \"\"\"\n Unpacks some of the inputs (and calculates simple objects based on them),\n storing the results in self for use by other methods. These include:\n income shocks and probabilities, next period's marginal value function\n (etc), the probability of getting the worst income shock next period,\n the patience factor, human wealth, and the bounding MPCs.\n\n Parameters\n ----------\n solution_next : ConsumerSolution\n The solution to next period's one period problem.\n IncomeDstn : distribution.DiscreteDistribution\n A DiscreteDistribution with a pmf\n and two point value arrays in X, order:\n permanent shocks, transitory shocks.\n LivPrb : float\n Survival probability; likelihood of being alive at the beginning of\n the succeeding period.\n DiscFac : float\n Intertemporal discount factor for future utility.\n\n Returns\n -------\n None\n \"\"\"\n self.DiscFacEff = DiscFac * LivPrb # \"effective\" discount factor\n self.ShkPrbsNext = IncomeDstn.pmf\n self.PermShkValsNext = IncomeDstn.X[0]\n self.TranShkValsNext = IncomeDstn.X[1]\n self.PermShkMinNext = np.min(self.PermShkValsNext)\n self.TranShkMinNext = np.min(self.TranShkValsNext)\n self.vPfuncNext = solution_next.vPfunc\n self.WorstIncPrb = np.sum(\n self.ShkPrbsNext[\n (self.PermShkValsNext * self.TranShkValsNext)\n == (self.PermShkMinNext * self.TranShkMinNext)\n ]\n )\n\n if self.CubicBool:\n self.vPPfuncNext = solution_next.vPPfunc\n\n if self.vFuncBool:\n self.vFuncNext = solution_next.vFunc\n\n # Update the bounding MPCs and PDV of human wealth:\n self.PatFac = ((self.Rfree * self.DiscFacEff) ** (1.0 / self.CRRA)) / self.Rfree\n self.MPCminNow = 1.0 / (1.0 + self.PatFac / solution_next.MPCmin)\n self.ExIncNext = np.dot(\n self.ShkPrbsNext, self.TranShkValsNext * self.PermShkValsNext\n )\n self.hNrmNow = (\n self.PermGroFac / self.Rfree * (self.ExIncNext + solution_next.hNrm)\n )\n self.MPCmaxNow = 1.0 / (\n 1.0\n + (self.WorstIncPrb ** (1.0 / self.CRRA))\n * self.PatFac\n / solution_next.MPCmax\n )\n\n self.cFuncLimitIntercept = self.MPCminNow * self.hNrmNow\n self.cFuncLimitSlope = self.MPCminNow\n\n def defBoroCnst(self, BoroCnstArt):\n \"\"\"\n Defines the constrained portion of the consumption function as cFuncNowCnst,\n an attribute of self. Uses the artificial and natural borrowing constraints.\n\n Parameters\n ----------\n BoroCnstArt : float or None\n Borrowing constraint for the minimum allowable assets to end the\n period with. If it is less than the natural borrowing constraint,\n then it is irrelevant; BoroCnstArt=None indicates no artificial bor-\n rowing constraint.\n\n Returns\n -------\n none\n \"\"\"\n # Calculate the minimum allowable value of money resources in this period\n self.BoroCnstNat = (\n (self.solution_next.mNrmMin - self.TranShkMinNext)\n * (self.PermGroFac * self.PermShkMinNext)\n / self.Rfree\n )\n\n # Note: need to be sure to handle BoroCnstArt==None appropriately.\n # In Py2, this would evaluate to 5.0: np.max([None, 5.0]).\n # However in Py3, this raises a TypeError. Thus here we need to directly\n # address the situation in which BoroCnstArt == None:\n if BoroCnstArt is None:\n self.mNrmMinNow = self.BoroCnstNat\n else:\n self.mNrmMinNow = np.max([self.BoroCnstNat, BoroCnstArt])\n if self.BoroCnstNat < self.mNrmMinNow:\n self.MPCmaxEff = 1.0 # If actually constrained, MPC near limit is 1\n else:\n self.MPCmaxEff = self.MPCmaxNow\n\n # Define the borrowing constraint (limiting consumption function)\n self.cFuncNowCnst = LinearInterp(\n np.array([self.mNrmMinNow, self.mNrmMinNow + 1]), np.array([0.0, 1.0])\n )\n\n def prepareToSolve(self):\n \"\"\"\n Perform preparatory work before calculating the unconstrained consumption\n function.\n\n Parameters\n ----------\n none\n\n Returns\n -------\n none\n \"\"\"\n self.setAndUpdateValues(\n self.solution_next, self.IncomeDstn, self.LivPrb, self.DiscFac\n )\n self.defBoroCnst(self.BoroCnstArt)\n\n\n####################################################################################################\n####################################################################################################\n\n\nclass ConsIndShockSolverBasic(ConsIndShockSetup):\n \"\"\"\n This class solves a single period of a standard consumption-saving problem,\n using linear interpolation and without the ability to calculate the value\n function. ConsIndShockSolver inherits from this class and adds the ability\n to perform cubic interpolation and to calculate the value function.\n\n Note that this class does not have its own initializing method. It initial-\n izes the same problem in the same way as ConsIndShockSetup, from which it\n inherits.\n \"\"\"\n\n def prepareToCalcEndOfPrdvP(self):\n \"\"\"\n Prepare to calculate end-of-period marginal value by creating an array\n of market resources that the agent could have next period, considering\n the grid of end-of-period assets and the distribution of shocks he might\n experience next period.\n\n Parameters\n ----------\n none\n\n Returns\n -------\n aNrmNow : np.array\n A 1D array of end-of-period assets; also stored as attribute of self.\n \"\"\"\n\n # We define aNrmNow all the way from BoroCnstNat up to max(self.aXtraGrid)\n # even if BoroCnstNat < BoroCnstArt, so we can construct the consumption\n # function as the lower envelope of the (by the artificial borrowing con-\n # straint) uconstrained consumption function, and the artificially con-\n # strained consumption function.\n aNrmNow = np.asarray(self.aXtraGrid) + self.BoroCnstNat\n ShkCount = self.TranShkValsNext.size\n aNrm_temp = np.tile(aNrmNow, (ShkCount, 1))\n\n # Tile arrays of the income shocks and put them into useful shapes\n aNrmCount = aNrmNow.shape[0]\n PermShkVals_temp = (np.tile(self.PermShkValsNext, (aNrmCount, 1))).transpose()\n TranShkVals_temp = (np.tile(self.TranShkValsNext, (aNrmCount, 1))).transpose()\n ShkPrbs_temp = (np.tile(self.ShkPrbsNext, (aNrmCount, 1))).transpose()\n\n # Get cash on hand next period\n mNrmNext = (\n self.Rfree / (self.PermGroFac * PermShkVals_temp) * aNrm_temp\n + TranShkVals_temp\n ) # CDC 20191205: This should be divided by LivPrb[0] for Blanchard insurance\n\n # Store and report the results\n self.PermShkVals_temp = PermShkVals_temp\n self.ShkPrbs_temp = ShkPrbs_temp\n self.mNrmNext = mNrmNext\n self.aNrmNow = aNrmNow\n return aNrmNow\n\n\n def calcEndOfPrdvP(self):\n \"\"\"\n Calculate end-of-period marginal value of assets at each point in aNrmNow.\n Does so by taking a weighted sum of next period marginal values across\n income shocks (in a preconstructed grid self.mNrmNext).\n\n Parameters\n ----------\n none\n\n Returns\n -------\n EndOfPrdvP : np.array\n A 1D array of end-of-period marginal value of assets\n \"\"\"\n\n EndOfPrdvP = (\n self.DiscFacEff\n * self.Rfree\n * self.PermGroFac ** (-self.CRRA)\n * np.sum(\n self.PermShkVals_temp ** (-self.CRRA)\n * self.vPfuncNext(self.mNrmNext)\n * self.ShkPrbs_temp,\n axis=0,\n )\n )\n return EndOfPrdvP\n\n def getPointsForInterpolation(self, EndOfPrdvP, aNrmNow):\n \"\"\"\n Finds interpolation points (c,m) for the consumption function.\n\n Parameters\n ----------\n EndOfPrdvP : np.array\n Array of end-of-period marginal values.\n aNrmNow : np.array\n Array of end-of-period asset values that yield the marginal values\n in EndOfPrdvP.\n\n Returns\n -------\n c_for_interpolation : np.array\n Consumption points for interpolation.\n m_for_interpolation : np.array\n Corresponding market resource points for interpolation.\n \"\"\"\n cNrmNow = self.uPinv(EndOfPrdvP)\n mNrmNow = cNrmNow + aNrmNow\n\n # Limiting consumption is zero as m approaches mNrmMin\n c_for_interpolation = np.insert(cNrmNow, 0, 0.0, axis=-1)\n m_for_interpolation = np.insert(mNrmNow, 0, self.BoroCnstNat, axis=-1)\n\n # Store these for calcvFunc\n self.cNrmNow = cNrmNow\n self.mNrmNow = mNrmNow\n\n return c_for_interpolation, m_for_interpolation\n\n def usePointsForInterpolation(self, cNrm, mNrm, interpolator):\n \"\"\"\n Constructs a basic solution for this period, including the consumption\n function and marginal value function.\n\n Parameters\n ----------\n cNrm : np.array\n (Normalized) consumption points for interpolation.\n mNrm : np.array\n (Normalized) corresponding market resource points for interpolation.\n interpolator : function\n A function that constructs and returns a consumption function.\n\n Returns\n -------\n solution_now : ConsumerSolution\n The solution to this period's consumption-saving problem, with a\n consumption function, marginal value function, and minimum m.\n \"\"\"\n # Construct the unconstrained consumption function\n cFuncNowUnc = interpolator(mNrm, cNrm)\n\n # Combine the constrained and unconstrained functions into the true consumption function\n cFuncNow = LowerEnvelope(cFuncNowUnc, self.cFuncNowCnst, nan_bool=False)\n\n # Make the marginal value function and the marginal marginal value function\n vPfuncNow = MargValueFunc(cFuncNow, self.CRRA)\n\n # Pack up the solution and return it\n solution_now = ConsumerSolution(\n cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow\n )\n return solution_now\n\n def makeBasicSolution(self, EndOfPrdvP, aNrm, interpolator):\n \"\"\"\n Given end of period assets and end of period marginal value, construct\n the basic solution for this period.\n\n Parameters\n ----------\n EndOfPrdvP : np.array\n Array of end-of-period marginal values.\n aNrm : np.array\n Array of end-of-period asset values that yield the marginal values\n in EndOfPrdvP.\n\n interpolator : function\n A function that constructs and returns a consumption function.\n\n Returns\n -------\n solution_now : ConsumerSolution\n The solution to this period's consumption-saving problem, with a\n consumption function, marginal value function, and minimum m.\n \"\"\"\n cNrm, mNrm = self.getPointsForInterpolation(EndOfPrdvP, aNrm)\n solution_now = self.usePointsForInterpolation(cNrm, mNrm, interpolator)\n return solution_now\n\n def addMPCandHumanWealth(self, solution):\n \"\"\"\n Take a solution and add human wealth and the bounding MPCs to it.\n\n Parameters\n ----------\n solution : ConsumerSolution\n The solution to this period's consumption-saving problem.\n\n Returns:\n ----------\n solution : ConsumerSolution\n The solution to this period's consumption-saving problem, but now\n with human wealth and the bounding MPCs.\n \"\"\"\n solution.hNrm = self.hNrmNow\n solution.MPCmin = self.MPCminNow\n solution.MPCmax = self.MPCmaxEff\n return solution\n\n def makeLinearcFunc(self, mNrm, cNrm):\n \"\"\"\n Makes a linear interpolation to represent the (unconstrained) consumption function.\n\n Parameters\n ----------\n mNrm : np.array\n Corresponding market resource points for interpolation.\n cNrm : np.array\n Consumption points for interpolation.\n\n Returns\n -------\n cFuncUnc : LinearInterp\n The unconstrained consumption function for this period.\n \"\"\"\n cFuncUnc = LinearInterp(\n mNrm, cNrm, self.cFuncLimitIntercept, self.cFuncLimitSlope\n )\n return cFuncUnc\n\n def solve(self):\n \"\"\"\n Solves a one period consumption saving problem with risky income.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n solution : ConsumerSolution\n The solution to the one period problem.\n \"\"\"\n aNrm = self.prepareToCalcEndOfPrdvP()\n EndOfPrdvP = self.calcEndOfPrdvP()\n solution = self.makeBasicSolution(EndOfPrdvP, aNrm, self.makeLinearcFunc)\n solution = self.addMPCandHumanWealth(solution)\n return solution\n\n\n###############################################################################\n###############################################################################\n\n\nclass ConsIndShockSolver(ConsIndShockSolverBasic):\n \"\"\"\n This class solves a single period of a standard consumption-saving problem.\n It inherits from ConsIndShockSolverBasic, adding the ability to perform cubic\n interpolation and to calculate the value function.\n \"\"\"\n\n def makeCubiccFunc(self, mNrm, cNrm):\n \"\"\"\n Makes a cubic spline interpolation of the unconstrained consumption\n function for this period.\n\n Parameters\n ----------\n mNrm : np.array\n Corresponding market resource points for interpolation.\n cNrm : np.array\n Consumption points for interpolation.\n\n Returns\n -------\n cFuncUnc : CubicInterp\n The unconstrained consumption function for this period.\n \"\"\"\n EndOfPrdvPP = (\n self.DiscFacEff\n * self.Rfree\n * self.Rfree\n * self.PermGroFac ** (-self.CRRA - 1.0)\n * np.sum(\n self.PermShkVals_temp ** (-self.CRRA - 1.0)\n * self.vPPfuncNext(self.mNrmNext)\n * self.ShkPrbs_temp,\n axis=0,\n )\n )\n dcda = EndOfPrdvPP / self.uPP(np.array(cNrm[1:]))\n MPC = dcda / (dcda + 1.0)\n MPC = np.insert(MPC, 0, self.MPCmaxNow)\n\n cFuncNowUnc = CubicInterp(\n mNrm, cNrm, MPC, self.MPCminNow * self.hNrmNow, self.MPCminNow\n )\n return cFuncNowUnc\n\n def makeEndOfPrdvFunc(self, EndOfPrdvP):\n \"\"\"\n Construct the end-of-period value function for this period, storing it\n as an attribute of self for use by other methods.\n\n Parameters\n ----------\n EndOfPrdvP : np.array\n Array of end-of-period marginal value of assets corresponding to the\n asset values in self.aNrmNow.\n\n Returns\n -------\n none\n \"\"\"\n VLvlNext = (\n self.PermShkVals_temp ** (1.0 - self.CRRA)\n * self.PermGroFac ** (1.0 - self.CRRA)\n ) * self.vFuncNext(self.mNrmNext)\n EndOfPrdv = self.DiscFacEff * np.sum(VLvlNext * self.ShkPrbs_temp, axis=0)\n EndOfPrdvNvrs = self.uinv(\n EndOfPrdv\n ) # value transformed through inverse utility\n EndOfPrdvNvrsP = EndOfPrdvP * self.uinvP(EndOfPrdv)\n EndOfPrdvNvrs = np.insert(EndOfPrdvNvrs, 0, 0.0)\n EndOfPrdvNvrsP = np.insert(\n EndOfPrdvNvrsP, 0, EndOfPrdvNvrsP[0]\n ) # This is a very good approximation, vNvrsPP = 0 at the asset minimum\n aNrm_temp = np.insert(self.aNrmNow, 0, self.BoroCnstNat)\n EndOfPrdvNvrsFunc = CubicInterp(aNrm_temp, EndOfPrdvNvrs, EndOfPrdvNvrsP)\n self.EndOfPrdvFunc = ValueFunc(EndOfPrdvNvrsFunc, self.CRRA)\n\n def addvFunc(self, solution, EndOfPrdvP):\n \"\"\"\n Creates the value function for this period and adds it to the solution.\n\n Parameters\n ----------\n solution : ConsumerSolution\n The solution to this single period problem, likely including the\n consumption function, marginal value function, etc.\n EndOfPrdvP : np.array\n Array of end-of-period marginal value of assets corresponding to the\n asset values in self.aNrmNow.\n\n Returns\n -------\n solution : ConsumerSolution\n The single period solution passed as an input, but now with the\n value function (defined over market resources m) as an attribute.\n \"\"\"\n self.makeEndOfPrdvFunc(EndOfPrdvP)\n solution.vFunc = self.makevFunc(solution)\n return solution\n\n def makevFunc(self, solution):\n \"\"\"\n Creates the value function for this period, defined over market resources m.\n self must have the attribute EndOfPrdvFunc in order to execute.\n\n Parameters\n ----------\n solution : ConsumerSolution\n The solution to this single period problem, which must include the\n consumption function.\n\n Returns\n -------\n vFuncNow : ValueFunc\n A representation of the value function for this period, defined over\n normalized market resources m: v = vFuncNow(m).\n \"\"\"\n # Compute expected value and marginal value on a grid of market resources\n mNrm_temp = self.mNrmMinNow + self.aXtraGrid\n cNrmNow = solution.cFunc(mNrm_temp)\n aNrmNow = mNrm_temp - cNrmNow\n vNrmNow = self.u(cNrmNow) + self.EndOfPrdvFunc(aNrmNow)\n vPnow = self.uP(cNrmNow)\n\n # Construct the beginning-of-period value function\n vNvrs = self.uinv(vNrmNow) # value transformed through inverse utility\n vNvrsP = vPnow * self.uinvP(vNrmNow)\n mNrm_temp = np.insert(mNrm_temp, 0, self.mNrmMinNow)\n vNvrs = np.insert(vNvrs, 0, 0.0)\n vNvrsP = np.insert(\n vNvrsP, 0, self.MPCmaxEff ** (-self.CRRA / (1.0 - self.CRRA))\n )\n MPCminNvrs = self.MPCminNow ** (-self.CRRA / (1.0 - self.CRRA))\n vNvrsFuncNow = CubicInterp(\n mNrm_temp, vNvrs, vNvrsP, MPCminNvrs * self.hNrmNow, MPCminNvrs\n )\n vFuncNow = ValueFunc(vNvrsFuncNow, self.CRRA)\n return vFuncNow\n\n def addvPPfunc(self, solution):\n \"\"\"\n Adds the marginal marginal value function to an existing solution, so\n that the next solver can evaluate vPP and thus use cubic interpolation.\n\n Parameters\n ----------\n solution : ConsumerSolution\n The solution to this single period problem, which must include the\n consumption function.\n\n Returns\n -------\n solution : ConsumerSolution\n The same solution passed as input, but with the marginal marginal\n value function for this period added as the attribute vPPfunc.\n \"\"\"\n vPPfuncNow = MargMargValueFunc(solution.cFunc, self.CRRA)\n solution.vPPfunc = vPPfuncNow\n return solution\n\n\n def solve(self):\n \"\"\"\n Solves the single period consumption-saving problem using the method of\n endogenous gridpoints. Solution includes a consumption function cFunc\n (using cubic or linear splines), a marginal value function vPfunc, a min-\n imum acceptable level of normalized market resources mNrmMin, normalized\n human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also\n have a value function vFunc and marginal marginal value function vPPfunc.\n\n Parameters\n ----------\n none\n\n Returns\n -------\n solution : ConsumerSolution\n The solution to the single period consumption-saving problem.\n \"\"\"\n # Make arrays of end-of-period assets and end-of-period marginal value\n aNrm = self.prepareToCalcEndOfPrdvP()\n EndOfPrdvP = self.calcEndOfPrdvP()\n\n # Construct a basic solution for this period\n if self.CubicBool:\n solution = self.makeBasicSolution(\n EndOfPrdvP, aNrm, interpolator=self.makeCubiccFunc\n )\n else:\n solution = self.makeBasicSolution(\n EndOfPrdvP, aNrm, interpolator=self.makeLinearcFunc\n )\n solution = self.addMPCandHumanWealth(solution) # add a few things\n solution = self.addSSmNrm(solution) # find steady state m\n\n # Add the value function if requested, as well as the marginal marginal\n # value function if cubic splines were used (to prepare for next period)\n if self.vFuncBool:\n solution = self.addvFunc(solution, EndOfPrdvP)\n if self.CubicBool:\n solution = self.addvPPfunc(solution)\n return solution\n\n\n####################################################################################################\n####################################################################################################\n\n\nclass ConsKinkedRsolver(ConsIndShockSolver):\n \"\"\"\n A class to solve a single period consumption-saving problem where the interest\n rate on debt differs from the interest rate on savings. Inherits from\n ConsIndShockSolver, with nearly identical inputs and outputs. The key diff-\n erence is that Rfree is replaced by Rsave (a>0) and Rboro (a<0). The solver\n can handle Rboro == Rsave, which makes it identical to ConsIndShocksolver, but\n it terminates immediately if Rboro < Rsave, as this has a different solution.\n \"\"\"\n\n def __init__(\n self,\n solution_next,\n IncomeDstn,\n LivPrb,\n DiscFac,\n CRRA,\n Rboro,\n Rsave,\n PermGroFac,\n BoroCnstArt,\n aXtraGrid,\n vFuncBool,\n CubicBool,\n ):\n \"\"\"\n Constructor for a new solver for problems with risky income and a different\n interest rate on borrowing and saving.\n\n Parameters\n ----------\n solution_next : ConsumerSolution\n The solution to next period's one period problem.\n IncomeDstn : [np.array]\n A list containing three arrays of floats, representing a discrete\n approximation to the income process between the period being solved\n and the one immediately following (in solution_next). Order: event\n probabilities, permanent shocks, transitory shocks.\n LivPrb : float\n Survival probability; likelihood of being alive at the beginning of\n the succeeding period.\n DiscFac : float\n Intertemporal discount factor for future utility.\n CRRA : float\n Coefficient of relative risk aversion.\n Rboro: float\n Interest factor on assets between this period and the succeeding\n period when assets are negative.\n Rsave: float\n Interest factor on assets between this period and the succeeding\n period when assets are positive.\n PermGroFac : float\n Expected permanent income growth factor at the end of this period.\n BoroCnstArt: float or None\n Borrowing constraint for the minimum allowable assets to end the\n period with. If it is less than the natural borrowing constraint,\n then it is irrelevant; BoroCnstArt=None indicates no artificial bor-\n rowing constraint.\n aXtraGrid: np.array\n Array of \"extra\" end-of-period asset values-- assets above the\n absolute minimum acceptable level.\n vFuncBool: boolean\n An indicator for whether the value function should be computed and\n included in the reported solution.\n CubicBool: boolean\n An indicator for whether the solver should use cubic or linear inter-\n polation.\n\n Returns\n -------\n None\n \"\"\"\n assert (\n Rboro >= Rsave\n ), \"Interest factor on debt less than interest factor on savings!\"\n\n # Initialize the solver. Most of the steps are exactly the same as in\n # the non-kinked-R basic case, so start with that.\n ConsIndShockSolver.__init__(\n self,\n solution_next,\n IncomeDstn,\n LivPrb,\n DiscFac,\n CRRA,\n Rboro,\n PermGroFac,\n BoroCnstArt,\n aXtraGrid,\n vFuncBool,\n CubicBool,\n )\n\n # Assign the interest rates as class attributes, to use them later.\n self.Rboro = Rboro\n self.Rsave = Rsave\n\n def makeCubiccFunc(self, mNrm, cNrm):\n \"\"\"\n Makes a cubic spline interpolation that contains the kink of the unconstrained\n consumption function for this period.\n\n Parameters\n ----------\n mNrm : np.array\n Corresponding market resource points for interpolation.\n cNrm : np.array\n Consumption points for interpolation.\n\n Returns\n -------\n cFuncUnc : CubicInterp\n The unconstrained consumption function for this period.\n \"\"\"\n # Call the makeCubiccFunc from ConsIndShockSolver.\n cFuncNowUncKink = super().makeCubiccFunc(mNrm, cNrm)\n\n # Change the coeffients at the kinked points.\n cFuncNowUncKink.coeffs[self.i_kink + 1] = [\n cNrm[self.i_kink],\n mNrm[self.i_kink + 1] - mNrm[self.i_kink],\n 0,\n 0,\n ]\n\n return cFuncNowUncKink\n\n def prepareToCalcEndOfPrdvP(self):\n \"\"\"\n Prepare to calculate end-of-period marginal value by creating an array\n of market resources that the agent could have next period, considering\n the grid of end-of-period assets and the distribution of shocks he might\n experience next period. This differs from the baseline case because\n different savings choices yield different interest rates.\n\n Parameters\n ----------\n none\n\n Returns\n -------\n aNrmNow : np.array\n A 1D array of end-of-period assets; also stored as attribute of self.\n \"\"\"\n KinkBool = (\n self.Rboro > self.Rsave\n ) # Boolean indicating that there is actually a kink.\n # When Rboro == Rsave, this method acts just like it did in IndShock.\n # When Rboro < Rsave, the solver would have terminated when it was called.\n\n # Make a grid of end-of-period assets, including *two* copies of a=0\n if KinkBool:\n aNrmNow = np.sort(\n np.hstack(\n (np.asarray(self.aXtraGrid) + self.mNrmMinNow, np.array([0.0, 0.0]))\n )\n )\n else:\n aNrmNow = np.asarray(self.aXtraGrid) + self.mNrmMinNow\n aXtraCount = aNrmNow.size\n\n # Make tiled versions of the assets grid and income shocks\n ShkCount = self.TranShkValsNext.size\n aNrm_temp = np.tile(aNrmNow, (ShkCount, 1))\n PermShkVals_temp = (np.tile(self.PermShkValsNext, (aXtraCount, 1))).transpose()\n TranShkVals_temp = (np.tile(self.TranShkValsNext, (aXtraCount, 1))).transpose()\n ShkPrbs_temp = (np.tile(self.ShkPrbsNext, (aXtraCount, 1))).transpose()\n\n # Make a 1D array of the interest factor at each asset gridpoint\n Rfree_vec = self.Rsave * np.ones(aXtraCount)\n if KinkBool:\n self.i_kink = (\n np.sum(aNrmNow <= 0) - 1\n ) # Save the index of the kink point as an attribute\n Rfree_vec[0 : self.i_kink] = self.Rboro\n self.Rfree = Rfree_vec\n Rfree_temp = np.tile(Rfree_vec, (ShkCount, 1))\n\n # Make an array of market resources that we could have next period,\n # considering the grid of assets and the income shocks that could occur\n mNrmNext = (\n Rfree_temp / (self.PermGroFac * PermShkVals_temp) * aNrm_temp\n + TranShkVals_temp\n )\n\n # Recalculate the minimum MPC and human wealth using the interest factor on saving.\n # This overwrites values from setAndUpdateValues, which were based on Rboro instead.\n if KinkBool:\n PatFacTop = (\n (self.Rsave * self.DiscFacEff) ** (1.0 / self.CRRA)\n ) / self.Rsave\n self.MPCminNow = 1.0 / (1.0 + PatFacTop / self.solution_next.MPCmin)\n self.hNrmNow = (\n self.PermGroFac\n / self.Rsave\n * (\n np.dot(\n self.ShkPrbsNext, self.TranShkValsNext * self.PermShkValsNext\n )\n + self.solution_next.hNrm\n )\n )\n\n # Store some of the constructed arrays for later use and return the assets grid\n self.PermShkVals_temp = PermShkVals_temp\n self.ShkPrbs_temp = ShkPrbs_temp\n self.mNrmNext = mNrmNext\n self.aNrmNow = aNrmNow\n return aNrmNow\n\n\n# ============================================================================\n# == Classes for representing types of consumer agents (and things they do) ==\n# ============================================================================\n\n# Make a dictionary to specify a perfect foresight consumer type\ninit_perfect_foresight = {\n 'CRRA': 2.0, # Coefficient of relative risk aversion,\n 'Rfree': 1.03, # Interest factor on assets\n 'DiscFac': 0.96, # Intertemporal discount factor\n 'LivPrb': [0.98], # Survival probability\n 'PermGroFac': [1.01], # Permanent income growth factor\n 'BoroCnstArt': None, # Artificial borrowing constraint\n 'MaxKinks': 400, # Maximum number of grid points to allow in cFunc (should be large)\n 'AgentCount': 10000, # Number of agents of this type (only matters for simulation)\n 'aNrmInitMean' : 0.0, # Mean of log initial assets (only matters for simulation)\n 'aNrmInitStd' : 1.0, # Standard deviation of log initial assets (only for simulation)\n 'pLvlInitMean' : 0.0, # Mean of log initial permanent income (only matters for simulation)\n 'pLvlInitStd' : 0.0, # Standard deviation of log initial permanent income (only matters for simulation)\n 'PermGroFacAgg' : 1.0,# Aggregate permanent income growth factor: portion of PermGroFac attributable to aggregate productivity growth (only matters for simulation)\n 'T_age' : None, # Age after which simulated agents are automatically killed\n 'T_cycle' : 1 # Number of periods in the cycle for this agent type\n}\n\n\nclass PerfForesightConsumerType(AgentType):\n \"\"\"\n A perfect foresight consumer type who has no uncertainty other than mortality.\n His problem is defined by a coefficient of relative risk aversion, intertemporal\n discount factor, interest factor, an artificial borrowing constraint (maybe)\n and time sequences of the permanent income growth rate and survival probability.\n \"\"\"\n\n # Define some universal values for all consumer types\n cFunc_terminal_ = LinearInterp([0.0, 1.0], [0.0, 1.0]) # c=m in terminal period\n vFunc_terminal_ = LinearInterp([0.0, 1.0], [0.0, 0.0]) # This is overwritten\n solution_terminal_ = ConsumerSolution(\n cFunc=cFunc_terminal_,\n vFunc=vFunc_terminal_,\n mNrmMin=0.0,\n hNrm=0.0,\n MPCmin=1.0,\n MPCmax=1.0,\n )\n time_vary_ = [\"LivPrb\", \"PermGroFac\"]\n time_inv_ = [\"CRRA\", \"Rfree\", \"DiscFac\", \"MaxKinks\", \"BoroCnstArt\"]\n state_vars = ['pLvlNow', 'PlvlAggNow', 'bNrmNow', 'mNrmNow', \"aNrmNow\"]\n shock_vars_ = []\n\n def __init__(self, cycles=1, verbose=1, quiet=False, **kwds):\n \"\"\"\n Instantiate a new consumer type with given data.\n See init_perfect_foresight for a dictionary of\n the keywords that should be passed to the constructor.\n\n Parameters\n ----------\n cycles : int\n Number of times the sequence of periods should be solved.\n\n Returns\n -------\n None\n \"\"\"\n\n params = init_perfect_foresight.copy()\n params.update(kwds)\n kwds = params\n\n # Initialize a basic AgentType\n AgentType.__init__(\n self,\n solution_terminal=deepcopy(self.solution_terminal_),\n cycles=cycles,\n pseudo_terminal=False,\n **kwds\n )\n\n # Add consumer-type specific objects, copying to create independent versions\n self.time_vary = deepcopy(self.time_vary_)\n self.time_inv = deepcopy(self.time_inv_)\n\n self.shock_vars = deepcopy(self.shock_vars_)\n self.verbose = verbose\n self.quiet = quiet\n self.solveOnePeriod = makeOnePeriodOOSolver(ConsPerfForesightSolver)\n set_verbosity_level((4 - verbose) * 10)\n\n def preSolve(self):\n self.updateSolutionTerminal() # Solve the terminal period problem\n\n # Fill in BoroCnstArt and MaxKinks if they're not specified or are irrelevant.\n if not hasattr(self, \"BoroCnstArt\"): # If no borrowing constraint specified...\n self.BoroCnstArt = None # ...assume the user wanted none\n if not hasattr(self, \"MaxKinks\"):\n if self.cycles > 0: # If it's not an infinite horizon model...\n self.MaxKinks = np.inf # ...there's no need to set MaxKinks\n elif self.BoroCnstArt is None: # If there's no borrowing constraint...\n self.MaxKinks = np.inf # ...there's no need to set MaxKinks\n else:\n raise (\n AttributeError(\n \"PerfForesightConsumerType requires the attribute MaxKinks to be specified when BoroCnstArt is not None and cycles == 0.\"\n )\n )\n\n def checkRestrictions(self):\n \"\"\"\n A method to check that various restrictions are met for the model class.\n \"\"\"\n if self.DiscFac < 0:\n raise Exception(\"DiscFac is below zero with value: \" + str(self.DiscFac))\n\n return\n\n def updateSolutionTerminal(self):\n \"\"\"\n Update the terminal period solution. This method should be run when a\n new AgentType is created or when CRRA changes.\n\n Parameters\n ----------\n none\n\n Returns\n -------\n none\n \"\"\"\n self.solution_terminal.vFunc = ValueFunc(self.cFunc_terminal_, self.CRRA)\n self.solution_terminal.vPfunc = MargValueFunc(self.cFunc_terminal_, self.CRRA)\n self.solution_terminal.vPPfunc = MargMargValueFunc(\n self.cFunc_terminal_, self.CRRA\n )\n\n def unpackcFunc(self):\n \"\"\" DEPRECATED: Use solution.unpack('cFunc') instead.\n \"Unpacks\" the consumption functions into their own field for easier access.\n After the model has been solved, the consumption functions reside in the\n attribute cFunc of each element of ConsumerType.solution. This method\n creates a (time varying) attribute cFunc that contains a list of consumption\n functions.\n Parameters\n ----------\n none\n Returns\n -------\n none\n \"\"\"\n _log.critical(\n \"unpackcFunc is deprecated and it will soon be removed, \"\n \"please use unpack('cFunc') instead.\"\n )\n self.unpack(\"cFunc\")\n\n def initializeSim(self):\n self.PermShkAggNow = self.PermGroFacAgg # This never changes during simulation\n self.state_now['PlvlAggNow'] = 1.0\n AgentType.initializeSim(self)\n\n def simBirth(self, which_agents):\n \"\"\"\n Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as\n well as time variables t_age and t_cycle. Normalized assets and permanent income levels\n are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc).\n\n Parameters\n ----------\n which_agents : np.array(Bool)\n Boolean array of size self.AgentCount indicating which agents should be \"born\".\n\n Returns\n -------\n None\n \"\"\"\n # Get and store states for newly born agents\n N = np.sum(which_agents) # Number of new consumers to make\n self.state_now[\"aNrmNow\"][which_agents] = Lognormal(\n mu=self.aNrmInitMean,\n sigma=self.aNrmInitStd,\n seed=self.RNG.randint(0, 2 ** 31 - 1),\n ).draw(N)\n # why is a now variable set here? Because it's an aggregate.\n pLvlInitMeanNow = self.pLvlInitMean + np.log(\n self.state_now[\"PlvlAggNow\"]\n ) # Account for newer cohorts having higher permanent income\n self.state_now[\"pLvlNow\"][which_agents] = Lognormal(\n pLvlInitMeanNow,\n self.pLvlInitStd,\n seed=self.RNG.randint(0, 2 ** 31 - 1)\n ).draw(N)\n self.t_age[which_agents] = 0 # How many periods since each agent was born\n self.t_cycle[\n which_agents\n ] = 0 # Which period of the cycle each agent is currently in\n return None\n\n def simDeath(self):\n \"\"\"\n Determines which agents die this period and must be replaced. Uses the sequence in LivPrb\n to determine survival probabilities for each agent.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n which_agents : np.array(bool)\n Boolean array of size AgentCount indicating which agents die.\n \"\"\"\n # Determine who dies\n DiePrb_by_t_cycle = 1.0 - np.asarray(self.LivPrb)\n DiePrb = DiePrb_by_t_cycle[\n self.t_cycle - 1\n ] # Time has already advanced, so look back one\n DeathShks = Uniform(seed=self.RNG.randint(0, 2 ** 31 - 1)).draw(\n N=self.AgentCount\n )\n which_agents = DeathShks < DiePrb\n if self.T_age is not None: # Kill agents that have lived for too many periods\n too_old = self.t_age >= self.T_age\n which_agents = np.logical_or(which_agents, too_old)\n return which_agents\n\n def getShocks(self):\n \"\"\"\n Finds permanent and transitory income \"shocks\" for each agent this period. As this is a\n perfect foresight model, there are no stochastic shocks: PermShkNow = PermGroFac for each\n agent (according to their t_cycle) and TranShkNow = 1.0 for all agents.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n PermGroFac = np.array(self.PermGroFac)\n self.shocks[\"PermShkNow\"] = PermGroFac[\n self.t_cycle - 1\n ] # cycle time has already been advanced\n self.shocks[\"TranShkNow\"] = np.ones(self.AgentCount)\n\n def getRfree(self):\n \"\"\"\n Returns an array of size self.AgentCount with self.Rfree in every entry.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n RfreeNow : np.array\n Array of size self.AgentCount with risk free interest rate for each agent.\n \"\"\"\n RfreeNow = self.Rfree * np.ones(self.AgentCount)\n return RfreeNow\n\n def transition(self):\n pLvlPrev = self.state_prev['pLvlNow']\n aNrmPrev = self.state_prev['aNrmNow']\n RfreeNow = self.getRfree()\n\n # Calculate new states: normalized market resources and permanent income level\n pLvlNow = pLvlPrev*self.shocks['PermShkNow'] # Updated permanent income level\n PlvlAggNow = self.state_prev['PlvlAggNow']*self.PermShkAggNow # Updated aggregate permanent productivity level\n ReffNow = RfreeNow/self.shocks['PermShkNow'] # \"Effective\" interest factor on normalized assets\n bNrmNow = ReffNow*aNrmPrev # Bank balances before labor income\n mNrmNow = bNrmNow + self.shocks['TranShkNow'] # Market resources after income\n\n return pLvlNow, PlvlAggNow, bNrmNow, mNrmNow, None\n\n\n def getControls(self):\n \"\"\"\n Calculates consumption for each consumer of this type using the consumption functions.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n cNrmNow = np.zeros(self.AgentCount) + np.nan\n MPCnow = np.zeros(self.AgentCount) + np.nan\n for t in range(self.T_cycle):\n these = t == self.t_cycle\n cNrmNow[these], MPCnow[these] = self.solution[t].cFunc.eval_with_derivative(\n self.state_now['mNrmNow'][these]\n )\n self.cNrmNow = cNrmNow\n self.MPCnow = MPCnow\n return None\n\n def getPostStates(self):\n \"\"\"\n Calculates end-of-period assets for each consumer of this type.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n # should this be \"Now\", or \"Prev\"?!?\n self.state_now['aNrmNow'] = self.state_now['mNrmNow'] - self.cNrmNow\n # Useful in some cases to precalculate asset level\n self.state_now['aLvlNow'] = self.state_now['aNrmNow'] * self.state_now['pLvlNow']\n\n # moves now to prev\n super().getPostStates()\n\n return None\n\n def checkCondition(self, name, test, messages, verbose, verbose_messages=None):\n \"\"\"\n Checks one condition.\n\n Parameters\n ----------\n name : string\n Name for the condition.\n\n test : function(self -> boolean)\n A function (of self) which tests the condition\n\n messages : dict{boolean : string}\n A dictiomary with boolean keys containing values\n for messages to print if the condition is\n true or false.\n\n verbose_messages : dict{boolean : string}\n (Optional) A dictiomary with boolean keys containing values\n for messages to print if the condition is\n true or false under verbose printing.\n \"\"\"\n self.conditions[name] = test(self)\n set_verbosity_level((4 - verbose) * 10)\n _log.info(messages[self.conditions[name]].format(self))\n if verbose_messages:\n _log.debug(verbose_messages[self.conditions[name]].format(self))\n\n def checkAIC(self, verbose=None):\n \"\"\"\n Evaluate and report on the Absolute Impatience Condition\n \"\"\"\n name = \"AIC\"\n test = lambda agent: agent.thorn < 1\n\n messages = {\n True: \"The value of the absolute impatience factor (APF) for the supplied parameter values satisfies the Absolute Impatience Condition.\",\n False: \"The given type violates the Absolute Impatience Condition with the supplied parameter values; the APF is {0.thorn}\",\n }\n verbose_messages = {\n True: \" Because the APF < 1, the absolute amount of consumption is expected to fall over time.\",\n False: \" Because the APF > 1, the absolute amount of consumption is expected to grow over time.\",\n }\n verbose = self.verbose if verbose is None else verbose\n self.checkCondition(name, test, messages, verbose, verbose_messages)\n\n def checkGICPF(self, verbose=None):\n \"\"\"\n Evaluate and report on the Growth Impatience Condition for the Perfect Foresight model\n \"\"\"\n name = \"GICPF\"\n\n self.GPFPF = self.thorn / self.PermGroFac[0]\n\n test = lambda agent: agent.GPFPF < 1\n\n messages = {\n True: \"The value of the Growth Patience Factor for the supplied parameter values satisfies the Perfect Foresight Growth Impatience Condition.\",\n False: \"The value of the Growth Patience Factor for the supplied parameter values fails the Perfect Foresight Growth Impatience Condition; the GPFPF is: {0.GPFPF}\",\n }\n\n verbose_messages = {\n True: \" Therefore, for a perfect foresight consumer, the ratio of individual wealth to permanent income will fall indefinitely.\",\n False: \" Therefore, for a perfect foresight consumer, the ratio of individual wealth to permanent income is expected to grow toward infinity.\",\n }\n verbose = self.verbose if verbose is None else verbose\n self.checkCondition(name, test, messages, verbose, verbose_messages)\n\n def checkRIC(self, verbose=None):\n \"\"\"\n Evaluate and report on the Return Impatience Condition\n \"\"\"\n\n self.RPF = self.thorn / self.Rfree\n\n name = \"RIC\"\n test = lambda agent: self.RPF < 1\n\n messages = {\n True: \"The value of the Return Patience Factor for the supplied parameter values satisfies the Return Impatience Condition.\",\n False: \"The value of the Return Patience Factor for the supplied parameter values fails the Return Impatience Condition; the factor is {0.RPF}\",\n }\n\n verbose_messages = {\n True: \" Therefore, the limiting consumption function is not c(m)=0 for all m\",\n False: \" Therefore, if the FHWC is satisfied, the limiting consumption function is c(m)=0 for all m.\",\n }\n verbose = self.verbose if verbose is None else verbose\n self.checkCondition(name, test, messages, verbose, verbose_messages)\n\n def checkFHWC(self, verbose=None):\n \"\"\"\n Evaluate and report on the Finite Human Wealth Condition\n \"\"\"\n\n self.FHWF = self.PermGroFac[0] / self.Rfree\n self.cNrmPDV = 1.0 / (1.0 - self.thorn / self.Rfree)\n\n name = \"FHWC\"\n test = lambda agent: self.FHWF < 1\n\n messages = {\n True: \"The Finite Human wealth factor value for the supplied parameter values satisfies the Finite Human Wealth Condition.\",\n False: \"The given type violates the Finite Human Wealth Condition; the Finite Human wealth factor value is {0.FHWF}\",\n }\n\n verbose_messages = {\n True: \" Therefore, the limiting consumption function is not c(m)=Infinity\\nand human wealth normalized by permanent income is {0.hNrm}\\nand the PDV of future consumption growth is {0.cNrmPDV}\",\n False: \" Therefore, the limiting consumption function is c(m)=Infinity for all m unless the RIC is also violated. If both FHWC and RIC fail and the consumer faces a liquidity constraint, the limiting consumption function is nondegenerate but has a limiting slope of 0. (https://econ-ark.github.io/BufferStockTheory#PFGICHoldsFHWCFailsRICFailsDiscuss)\",\n }\n verbose = self.verbose if verbose is None else verbose\n self.checkCondition(name, test, messages, verbose)\n\n def checkConditions(self, verbose=None):\n \"\"\"\n This method checks whether the instance's type satisfies the\n Absolute Impatience Condition (AIC),\n the Return Impatience Condition (RIC),\n the Finite Human Wealth Condition (FHWC) and the perfect foresight\n model's version of the Finite Value of the Growth Impatience Condition (GICPF) and\n Autarky Condition (FVACPF). Depending on the configuration of parameter values, some\n combination of these conditions must be satisfied in order for the problem to have\n a nondegenerate solution. To check which conditions are required, in the verbose mode\n a reference to the relevant theoretical literature is made.\n\n\n Parameters\n ----------\n verbose : boolean\n Specifies different levels of verbosity of feedback. When False, it only reports whether the\n instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.\n the factor values for all conditions.\n\n Returns\n -------\n None\n \"\"\"\n self.conditions = {}\n\n self.violated = False\n\n # This method only checks for the conditions for infinite horizon models\n # with a 1 period cycle. If these conditions are not met, we exit early.\n if self.cycles != 0 or self.T_cycle > 1:\n return\n\n self.thorn = (self.Rfree * self.DiscFac * self.LivPrb[0]) ** (1 / self.CRRA)\n\n verbose = self.verbose if verbose is None else verbose\n self.checkAIC(verbose)\n self.checkGICPF(verbose)\n self.checkRIC(verbose)\n self.checkFHWC(verbose)\n\n if hasattr(self, \"BoroCnstArt\") and self.BoroCnstArt is not None:\n self.violated = not self.conditions[\"RIC\"]\n else:\n self.violated = not self.conditions[\"RIC\"] or not self.conditions[\"FHWC\"]\n\n\n# Make a dictionary to specify an idiosyncratic income shocks consumer\ninit_idiosyncratic_shocks = dict(\n init_perfect_foresight,\n **{\n # assets above grid parameters\n \"aXtraMin\": 0.001, # Minimum end-of-period \"assets above minimum\" value\n \"aXtraMax\": 20, # Maximum end-of-period \"assets above minimum\" value\n \"aXtraNestFac\": 3, # Exponential nesting factor when constructing \"assets above minimum\" grid\n \"aXtraCount\": 48, # Number of points in the grid of \"assets above minimum\"\n \"aXtraExtra\": [\n None\n ], # Some other value of \"assets above minimum\" to add to the grid, not used\n # Income process variables\n \"PermShkStd\": [0.1], # Standard deviation of log permanent income shocks\n \"PermShkCount\": 7, # Number of points in discrete approximation to permanent income shocks\n \"TranShkStd\": [0.1], # Standard deviation of log transitory income shocks\n \"TranShkCount\": 7, # Number of points in discrete approximation to transitory income shocks\n \"UnempPrb\": 0.05, # Probability of unemployment while working\n \"UnempPrbRet\": 0.005, # Probability of \"unemployment\" while retired\n \"IncUnemp\": 0.3, # Unemployment benefits replacement rate\n \"IncUnempRet\": 0.0, # \"Unemployment\" benefits when retired\n \"BoroCnstArt\": 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets\n \"tax_rate\": 0.0, # Flat income tax rate\n \"T_retire\": 0, # Period of retirement (0 --> no retirement)\n \"vFuncBool\": False, # Whether to calculate the value function during solution\n \"CubicBool\": False, # Use cubic spline interpolation when True, linear interpolation when False\n }\n)\n\n\nclass IndShockConsumerType(PerfForesightConsumerType):\n \"\"\"\n A consumer type with idiosyncratic shocks to permanent and transitory income.\n His problem is defined by a sequence of income distributions, survival prob-\n abilities, and permanent income growth rates, as well as time invariant values\n for risk aversion, discount factor, the interest rate, the grid of end-of-\n period assets, and an artificial borrowing constraint.\n \"\"\"\n\n time_inv_ = PerfForesightConsumerType.time_inv_ + [\n \"BoroCnstArt\",\n \"vFuncBool\",\n \"CubicBool\",\n ]\n time_inv_.remove(\n \"MaxKinks\"\n ) # This is in the PerfForesight model but not ConsIndShock\n shock_vars_ = [\"PermShkNow\", \"TranShkNow\"]\n\n def __init__(self, cycles=1, verbose=1, quiet=False, **kwds):\n \"\"\"\n Instantiate a new ConsumerType with given data.\n See ConsumerParameters.init_idiosyncratic_shocks for a dictionary of\n the keywords that should be passed to the constructor.\n\n Parameters\n ----------\n cycles : int\n Number of times the sequence of periods should be solved.\n\n Returns\n -------\n None\n \"\"\"\n\n params = init_idiosyncratic_shocks.copy()\n params.update(kwds)\n\n # Initialize a basic AgentType\n PerfForesightConsumerType.__init__(\n self, cycles=cycles, verbose=verbose, quiet=quiet, **params\n )\n\n # Add consumer-type specific objects, copying to create independent versions\n if (not self.CubicBool) and (not self.vFuncBool):\n solver = ConsIndShockSolverBasic\n else: # Use the \"advanced\" solver if either is requested\n solver = ConsIndShockSolver\n self.solveOnePeriod = makeOnePeriodOOSolver(solver)\n\n self.update() # Make assets grid, income process, terminal solution\n\n def updateIncomeProcess(self):\n \"\"\"\n Updates this agent's income process based on his own attributes.\n\n Parameters\n ----------\n none\n\n Returns:\n -----------\n none\n \"\"\"\n (\n IncomeDstn,\n PermShkDstn,\n TranShkDstn,\n ) = self.constructLognormalIncomeProcessUnemployment()\n self.IncomeDstn = IncomeDstn\n self.PermShkDstn = PermShkDstn\n self.TranShkDstn = TranShkDstn\n self.addToTimeVary(\"IncomeDstn\", \"PermShkDstn\", \"TranShkDstn\")\n\n def updateAssetsGrid(self):\n \"\"\"\n Updates this agent's end-of-period assets grid by constructing a multi-\n exponentially spaced grid of aXtra values.\n\n Parameters\n ----------\n none\n\n Returns\n -------\n none\n \"\"\"\n aXtraGrid = constructAssetsGrid(self)\n self.aXtraGrid = aXtraGrid\n self.addToTimeInv(\"aXtraGrid\")\n\n def update(self):\n \"\"\"\n Update the income process, the assets grid, and the terminal solution.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n self.updateIncomeProcess()\n self.updateAssetsGrid()\n self.updateSolutionTerminal()\n\n def resetRNG(self):\n \"\"\"\n Reset the RNG behavior of this type. This method is called automatically\n by initializeSim(), ensuring that each simulation run uses the same sequence\n of random shocks; this is necessary for structural estimation to work.\n This method extends AgentType.resetRNG() to also reset elements of IncomeDstn.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n PerfForesightConsumerType.resetRNG(self)\n\n # Reset IncomeDstn if it exists (it might not because resetRNG is called at init)\n if hasattr(self, \"IncomeDstn\"):\n for dstn in self.IncomeDstn:\n dstn.reset()\n\n def getShocks(self):\n \"\"\"\n Gets permanent and transitory income shocks for this period. Samples from IncomeDstn for\n each period in the cycle.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n PermShkNow = np.zeros(self.AgentCount) # Initialize shock arrays\n TranShkNow = np.zeros(self.AgentCount)\n newborn = self.t_age == 0\n for t in range(self.T_cycle):\n these = t == self.t_cycle\n N = np.sum(these)\n if N > 0:\n IncomeDstnNow = self.IncomeDstn[\n t - 1\n ] # set current income distribution\n PermGroFacNow = self.PermGroFac[t - 1] # and permanent growth factor\n # Get random draws of income shocks from the discrete distribution\n IncShks = IncomeDstnNow.drawDiscrete(N)\n PermShkNow[these] = (\n IncShks[0, :] * PermGroFacNow\n ) # permanent \"shock\" includes expected growth\n TranShkNow[these] = IncShks[1, :]\n\n # That procedure used the *last* period in the sequence for newborns, but that's not right\n # Redraw shocks for newborns, using the *first* period in the sequence. Approximation.\n N = np.sum(newborn)\n if N > 0:\n these = newborn\n IncomeDstnNow = self.IncomeDstn[0] # set current income distribution\n PermGroFacNow = self.PermGroFac[0] # and permanent growth factor\n\n # Get random draws of income shocks from the discrete distribution\n EventDraws = IncomeDstnNow.draw_events(N)\n PermShkNow[these] = (\n IncomeDstnNow.X[0][EventDraws] * PermGroFacNow\n ) # permanent \"shock\" includes expected growth\n TranShkNow[these] = IncomeDstnNow.X[1][EventDraws]\n # PermShkNow[newborn] = 1.0\n TranShkNow[newborn] = 1.0\n\n # Store the shocks in self\n self.EmpNow = np.ones(self.AgentCount, dtype=bool)\n self.EmpNow[TranShkNow == self.IncUnemp] = False\n self.shocks[\"PermShkNow\"] = PermShkNow\n self.shocks[\"TranShkNow\"] = TranShkNow\n\n def calcBoundingValues(self):\n \"\"\"\n Calculate human wealth plus minimum and maximum MPC in an infinite\n horizon model with only one period repeated indefinitely. Store results\n as attributes of self. Human wealth is the present discounted value of\n expected future income after receiving income this period, ignoring mort-\n ality (because your income matters to you only if you are still alive).\n The maximum MPC is the limit of the MPC as m --> mNrmMin. The\n minimum MPC is the limit of the MPC as m --> infty.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n # Unpack the income distribution and get average and worst outcomes\n PermShkValsNext = self.IncomeDstn[0][1]\n TranShkValsNext = self.IncomeDstn[0][2]\n ShkPrbsNext = self.IncomeDstn[0][0]\n ExIncNext = np.dot(ShkPrbsNext, PermShkValsNext * TranShkValsNext)\n PermShkMinNext = np.min(PermShkValsNext)\n TranShkMinNext = np.min(TranShkValsNext)\n WorstIncNext = PermShkMinNext * TranShkMinNext\n WorstIncPrb = np.sum(\n ShkPrbsNext[(PermShkValsNext * TranShkValsNext) == WorstIncNext]\n )\n\n # Calculate human wealth and the infinite horizon natural borrowing constraint\n hNrm = (ExIncNext * self.PermGroFac[0] / self.Rfree) / (\n 1.0 - self.PermGroFac[0] / self.Rfree\n )\n temp = self.PermGroFac[0] * PermShkMinNext / self.Rfree\n BoroCnstNat = -TranShkMinNext * temp / (1.0 - temp)\n\n PatFac = (self.DiscFac * self.LivPrb[0] * self.Rfree) ** (\n 1.0 / self.CRRA\n ) / self.Rfree\n if BoroCnstNat < self.BoroCnstArt:\n MPCmax = 1.0 # if natural borrowing constraint is overridden by artificial one, MPCmax is 1\n else:\n MPCmax = 1.0 - WorstIncPrb ** (1.0 / self.CRRA) * PatFac\n MPCmin = 1.0 - PatFac\n\n # Store the results as attributes of self\n self.hNrm = hNrm\n self.MPCmin = MPCmin\n self.MPCmax = MPCmax\n\n def makeEulerErrorFunc(self, mMax=100, approx_inc_dstn=True):\n \"\"\"\n Creates a \"normalized Euler error\" function for this instance, mapping\n from market resources to \"consumption error per dollar of consumption.\"\n Stores result in attribute eulerErrorFunc as an interpolated function.\n Has option to use approximate income distribution stored in self.IncomeDstn\n or to use a (temporary) very dense approximation.\n\n Only works on (one period) infinite horizon models at this time, will\n be generalized later.\n\n Parameters\n ----------\n mMax : float\n Maximum normalized market resources for the Euler error function.\n approx_inc_dstn : Boolean\n Indicator for whether to use the approximate discrete income distri-\n bution stored in self.IncomeDstn[0], or to use a very accurate\n discrete approximation instead. When True, uses approximation in\n IncomeDstn; when False, makes and uses a very dense approximation.\n\n Returns\n -------\n None\n \"\"\"\n # Get the income distribution (or make a very dense one)\n if approx_inc_dstn:\n IncomeDstn = self.IncomeDstn[0]\n else:\n TranShkDstn = MeanOneLogNormal(sigma=self.TranShkStd[0]).approx(\n N=200, tail_N=50, tail_order=1.3, tail_bound=[0.05, 0.95]\n )\n TranShkDstn = addDiscreteOutcomeConstantMean(\n TranShkDstn, self.UnempPrb, self.IncUnemp\n )\n PermShkDstn = MeanOneLogNormal(sigma=self.PermShkStd[0]).approx(\n N=200, tail_N=50, tail_order=1.3, tail_bound=[0.05, 0.95]\n )\n IncomeDstn = combineIndepDstns(PermShkDstn, TranShkDstn)\n\n # Make a grid of market resources\n mNowMin = self.solution[0].mNrmMin + 10 ** (\n -15\n ) # add tiny bit to get around 0/0 problem\n mNowMax = mMax\n mNowGrid = np.linspace(mNowMin, mNowMax, 1000)\n\n # Get the consumption function this period and the marginal value function\n # for next period. Note that this part assumes a one period cycle.\n cFuncNow = self.solution[0].cFunc\n vPfuncNext = self.solution[0].vPfunc\n\n # Calculate consumption this period at each gridpoint (and assets)\n cNowGrid = cFuncNow(mNowGrid)\n aNowGrid = mNowGrid - cNowGrid\n\n # Tile the grids for fast computation\n ShkCount = IncomeDstn[0].size\n aCount = aNowGrid.size\n aNowGrid_tiled = np.tile(aNowGrid, (ShkCount, 1))\n PermShkVals_tiled = (np.tile(IncomeDstn[1], (aCount, 1))).transpose()\n TranShkVals_tiled = (np.tile(IncomeDstn[2], (aCount, 1))).transpose()\n ShkPrbs_tiled = (np.tile(IncomeDstn[0], (aCount, 1))).transpose()\n\n # Calculate marginal value next period for each gridpoint and each shock\n mNextArray = (\n self.Rfree / (self.PermGroFac[0] * PermShkVals_tiled) * aNowGrid_tiled\n + TranShkVals_tiled\n )\n vPnextArray = vPfuncNext(mNextArray)\n\n # Calculate expected marginal value and implied optimal consumption\n ExvPnextGrid = (\n self.DiscFac\n * self.Rfree\n * self.LivPrb[0]\n * self.PermGroFac[0] ** (-self.CRRA)\n * np.sum(\n PermShkVals_tiled ** (-self.CRRA) * vPnextArray * ShkPrbs_tiled, axis=0\n )\n )\n cOptGrid = ExvPnextGrid ** (\n -1.0 / self.CRRA\n ) # This is the 'Endogenous Gridpoints' step\n\n # Calculate Euler error and store an interpolated function\n EulerErrorNrmGrid = (cNowGrid - cOptGrid) / cOptGrid\n eulerErrorFunc = LinearInterp(mNowGrid, EulerErrorNrmGrid)\n self.eulerErrorFunc = eulerErrorFunc\n\n def preSolve(self):\n # AgentType.preSolve(self)\n # Update all income process variables to match any attributes that might\n # have been changed since `__init__` or `solve()` was last called.\n # self.updateIncomeProcess()\n self.updateSolutionTerminal()\n if not self.quiet:\n self.checkConditions(verbose=self.verbose)\n\n def checkGICInd(self, verbose=None):\n \"\"\"\n Check Individual Growth Impatience Factor.\n \"\"\"\n self.GPFInd = self.thorn / (\n self.PermGroFac[0] * self.InvEPermShkInv\n ) # [url]/#GICI\n\n name = \"GIC\"\n test = lambda agent: agent.GPFInd <= 1\n\n messages = {\n True: \"\\nThe value of the Individual Growth Impatience Factor for the supplied parameter values satisfies the Individual Growth Impatience Condition; the value of the GPFInd is: {0.GPFInd}\",\n False: \"\\nThe given parameter values violate the Individual Growth Impatience Condition; the GPFInd is: {0.GPFInd}\",\n }\n\n verbose_messages = {\n True: \" Therefore, a target level of the individual market resources ratio m exists (see {0.url}/#onetarget for more).\\n\",\n False: \" Therefore, a target ratio of individual market resources to individual permanent income does not exist. (see {0.url}/#onetarget for more).\\n\",\n }\n verbose = self.verbose if verbose is None else verbose\n self.checkCondition(name, test, messages, verbose, verbose_messages)\n\n def checkCIGAgg(self, verbose=None):\n name = \"GICAgg\"\n test = lambda agent: agent.GPFAgg <= 1\n\n messages = {\n True: \"\\nThe value of the Aggregate Growth Impatience Factor for the supplied parameter values satisfies the Aggregate Growth Impatience Condition; the value of the GPFAgg is: {0.GPFAgg}\",\n False: \"\\nThe given parameter values violate the Aggregate Growth Impatience Condition; the GPFAgg is: {0.GPFAgg}\",\n }\n\n verbose_messages = {\n True: \" Therefore, a target level of the ratio of aggregate market resources to aggregate permanent income exists.\\n\", # (see {0.url}/#WRIC for more).',\n False: \" Therefore, a target ratio of aggregate resources to aggregate permanent income may not exist.\\n\", # (see {0.url}/#WRIC for more).'\n }\n verbose = self.verbose if verbose is None else verbose\n self.checkCondition(name, test, messages, verbose, verbose_messages)\n\n def checkWRIC(self, verbose=None):\n \"\"\"\n Evaluate and report on the Weak Return Impatience Condition\n [url]/#WRPF modified to incorporate LivPrb\n \"\"\"\n self.WRPF = (\n (self.UnempPrb ** (1 / self.CRRA))\n * (self.Rfree * self.DiscFac * self.LivPrb[0]) ** (1 / self.CRRA)\n / self.Rfree\n )\n\n name = \"WRIC\"\n test = lambda agent: agent.WRPF <= 1\n\n messages = {\n True: \"\\nThe Weak Return Impatience Factor value for the supplied parameter values satisfies the Weak Return Impatience Condition; the WRPF is {0.WRPF}.\",\n False: \"\\nThe Weak Return Impatience Factor value for the supplied parameter values fails the Weak Return Impatience Condition; the WRPF is {0.WRPF} (see {0.url}/#WRIC for more).\",\n }\n\n verbose_messages = {\n True: \" Therefore, a nondegenerate solution exists if the FVAC is also satisfied. (see {0.url}/#WRIC for more) \\n\",\n False: \" Therefore, a nondegenerate solution is not available (see {0.url}/#WRIC for more). \\n\",\n }\n verbose = self.verbose if verbose is None else verbose\n self.checkCondition(name, test, messages, verbose, verbose_messages)\n\n def checkFVAC(self, verbose=None):\n \"\"\"\n Evaluate and report on the Finite Value of Autarky Condition\n Hyperlink to paper: [url]/#Autarky-Value\n \"\"\"\n EpShkuInv = np.dot(\n self.PermShkDstn[0].pmf, self.PermShkDstn[0].X ** (1 - self.CRRA)\n )\n if self.CRRA != 1.0:\n uInvEpShkuInv = EpShkuInv ** (\n 1 / (1 - self.CRRA)\n ) # The term that gives a utility-consequence-adjusted utility growth\n else:\n uInvEpShkuInv = 1.0\n\n self.uInvEpShkuInv = uInvEpShkuInv\n\n self.FVAF = self.LivPrb[0] * self.DiscFac * self.uInvEpShkuInv\n\n name = \"FVAC\"\n test = lambda agent: agent.FVAF <= 1\n\n messages = {\n True: \"\\nThe Finite Value of Autarky Factor (FVAV) for the supplied parameter values satisfies the Finite Value of Autarky Condition; the FVAF is {0.FVAF}\",\n False: \"\\nThe Finite Value of Autarky Factor (FVAV) for the supplied parameter values fails the Finite Value of Autarky Condition; the FVAF is {0.FVAF}\",\n }\n\n verbose_messages = {\n True: \" Therefore, a nondegenerate solution exists if the WRIC also holds; see {0.url}/#Conditions-Under-Which-the-Problem-Defines-a-Contraction-Mapping\\n\",\n False: \" Therefore, a nondegenerate solution is not available (see {0.url}/#Conditions-Under-Which-the-Problem-Defines-a-Contraction-Mapping\\n\",\n }\n verbose = self.verbose if verbose is None else verbose\n self.checkCondition(name, test, messages, verbose, verbose_messages)\n\n def checkConditions(self, verbose=None):\n \"\"\"\n This method checks whether the instance's type satisfies the Absolute Impatience Condition (AIC), Weak Return\n Impatience Condition (WRIC), Finite Human Wealth Condition (FHWC) and Finite Value of\n Autarky Condition (FVAC). When combinations of these conditions are satisfied, the\n solution to the problem exhibits different characteristics. (For an exposition of the\n conditions, see http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/)\n\n Parameters\n ----------\n verbose : boolean\n Specifies different levels of verbosity of feedback. When False, it only reports whether the\n instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.\n the factor values for all conditions.\n\n Returns\n -------\n None\n \"\"\"\n self.conditions = {}\n\n self.violated = False # PerfForesightConsumerType.checkConditions(self, verbose=False, verbose_reference=False)\n\n if self.cycles != 0 or self.T_cycle > 1:\n return\n\n # For theory, see hyperlink targets to expressions in\n # url=http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory\n # For example, the hyperlink to the relevant section of the paper\n self.url = \"https://llorracc.github.io/BufferStockTheory\"\n # would be referenced below as:\n # [url]/#Uncertainty-Modified-Conditions\n\n self.InvPermShkDstn = deepcopy(self.PermShkDstn)\n\n self.InvPermShkDstn[0].X = 1 / self.PermShkDstn[0].X\n self.EPermShkInv = np.dot(\n self.InvPermShkDstn[0].pmf, 1 / self.PermShkDstn[0].X\n ) # $\\Ex_{t}[\\psi^{-1}_{t+1}]$ (in first eqn in sec)\n # [url]/#Pat, adjusted to include mortality\n\n self.InvEPermShkInv = (\n 1 / self.EPermShkInv\n ) # $\\underline{\\psi}$ in the paper (\\bar{\\isp} in private version)\n self.PermGroFacAdj = self.PermGroFac[0] * self.InvEPermShkInv # [url]/#PGroAdj\n\n self.thorn = ((self.Rfree * self.DiscFac)) ** (1 / self.CRRA)\n\n # self.Rnorm = self.Rfree*EPermShkInv/(self.PermGroFac[0]*self.LivPrb[0])\n self.GPFPF = self.thorn / (self.PermGroFac[0]) # [url]/#GPF\n # Lower bound of aggregate wealth growth if all inheritances squandered\n\n self.GPFAgg = self.thorn * self.LivPrb[0] / self.PermGroFac[0]\n\n self.DiscFacGPFPFMax = ((self.PermGroFac[0]) ** (self.CRRA)) / (\n self.Rfree\n ) # DiscFac at growth impatience knife edge\n self.DiscFacGPFIndMax = (\n (self.PermGroFac[0] * self.InvEPermShkInv) ** (self.CRRA)\n ) / (\n self.Rfree\n ) # DiscFac at growth impatience knife edge\n self.DiscFacGPFAggMax = ((self.PermGroFac[0]) ** (self.CRRA)) / (\n self.Rfree * self.LivPrb[0]\n ) # DiscFac at growth impatience knife edge\n verbose = self.verbose if verbose is None else verbose\n\n # self.checkGICPF(verbose)\n self.checkGICInd(verbose)\n self.checkCIGAgg(verbose)\n self.checkWRIC(verbose)\n self.checkFVAC(verbose)\n\n self.violated = not self.conditions[\"WRIC\"] or not self.conditions[\"FVAC\"]\n\n if self.violated:\n _log.warning(\n '\\n[!] For more information on the conditions, see Tables 3 and 4 in \"Theoretical Foundations of Buffer Stock Saving\" at '\n + self.url\n + \"/#Factors-Defined-And-Compared\"\n )\n\n _log.warning(\"GPFPF = %2.6f \" % (self.GPFPF))\n _log.warning(\"GPFInd = %2.6f \" % (self.GPFInd))\n _log.warning(\"GPFAgg = %2.6f \" % (self.GPFAgg))\n _log.warning(\"Thorn = APF = %2.6f \" % (self.thorn))\n _log.warning(\"PermGroFacAdj = %2.6f \" % (self.PermGroFacAdj))\n _log.warning(\"uInvEpShkuInv = %2.6f \" % (self.uInvEpShkuInv))\n _log.warning(\"FVAF = %2.6f \" % (self.FVAF))\n _log.warning(\"WRPF = %2.6f \" % (self.WRPF))\n _log.warning(\"DiscFacGPFIndMax = %2.6f \" % (self.DiscFacGPFIndMax))\n _log.warning(\"DiscFacGPFAggMax = %2.6f \" % (self.DiscFacGPFAggMax))\n\n def Ex_Mtp1_over_Ex_Ptp1(self, mRat):\n cRat = self.solution[-1].cFunc(mRat)\n aRat = mRat - cRat\n Ex_Ptp1 = PermGroFac[0]\n Ex_bLev_tp1 = aRat * self.Rfree\n Ex_Mtp1 = Ex_bLev_tp1\n return Ex_Mtp1 / Ex_Ptp1\n\n def Ex_mtp1(self, mRat):\n cRat = self.solution[-1].cFunc(mRat)\n aRat = mRat - cRat\n Ex_bRat_tp1 = aRat * self.Rfree * self.EPermShkInv / self.PermGroFac[0]\n Ex_Mtp1 = (Ex_bRat_tp1 + 1) * Ex_Ptp1 # mean TranShk and PermShk are 1\n return Ex_Mtp1 / Ex_Ptp1\n\n def calcTargets(self):\n \"\"\"\n If the problem is one that satisfies the conditions required for target ratios of different\n variables to permanent income to exist, and has been solved to within the self-defined\n tolerance, this method calculates the target values of market resources, consumption,\n and assets.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n infinite_horizon = cycles_left == 0\n if not infinite_horizon:\n _log.warning(\n \"The calcTargets method works only for infinite horizon models.\"\n )\n return\n\n # To be written.\n # Defining:\n ## Rnorm = Rfree/(PermGroFac[0]*PermShk)\n ## EPermShkInv = E[PermShk**(-1)]\n ## InvEPermShkInv = 1/EPermShkInv\n ## ExRnorm = E[Rfree/(PermGroFac[0]*PermShk)] = Rfree EPermShkInv / PermGroFac[0]\n ## InvExRnorm = 1/ExRnorm\n ## The \"sustainable consumption\" locus is given by\n # cSust = InvExRnorm + m*(1-InvExRnorm)\n\n # The target level of m, mTarg, will be the value such that\n # cSust[m] = cFunc[m]\n\n\n\n # ========================================================\n # = Functions for generating discrete income processes and\n # simulated income shocks =\n # ========================================================\n\n def constructLognormalIncomeProcessUnemployment(self):\n \"\"\"\n Generates a list of discrete approximations to the income process for each\n life period, from end of life to beginning of life. Permanent shocks are mean\n one lognormally distributed with standard deviation PermShkStd[t] during the\n working life, and degenerate at 1 in the retirement period. Transitory shocks\n are mean one lognormally distributed with a point mass at IncUnemp with\n probability UnempPrb while working; they are mean one with a point mass at\n IncUnempRet with probability UnempPrbRet. Retirement occurs\n after t=T_retire periods of working.\n\n Note 1: All time in this function runs forward, from t=0 to t=T\n\n Note 2: All parameters are passed as attributes of the input parameters.\n\n Parameters (passed as attributes of the input parameters)\n ----------\n PermShkStd : [float]\n List of standard deviations in log permanent income uncertainty during\n the agent's life.\n PermShkCount : int\n The number of approximation points to be used in the discrete approxima-\n tion to the permanent income shock distribution.\n TranShkStd : [float]\n List of standard deviations in log transitory income uncertainty during\n the agent's life.\n TranShkCount : int\n The number of approximation points to be used in the discrete approxima-\n tion to the permanent income shock distribution.\n UnempPrb : float\n The probability of becoming unemployed during the working period.\n UnempPrbRet : float\n The probability of not receiving typical retirement income when retired.\n T_retire : int\n The index value for the final working period in the agent's life.\n If T_retire <= 0 then there is no retirement.\n IncUnemp : float\n Transitory income received when unemployed.\n IncUnempRet : float\n Transitory income received while \"unemployed\" when retired.\n T_cycle : int\n Total number of non-terminal periods in the consumer's sequence of periods.\n\n Returns\n -------\n IncomeDstn : [[np.array]]\n A list with T_cycle elements, each of which is a list of three arrays\n representing a discrete approximation to the income process in a period.\n Order: probabilities, permanent shocks, transitory shocks.\n PermShkDstn : [[np.array]]\n A list with T_cycle elements, each of which is a list of two arrays\n representing a discrete approximation to the permanent income shocks.\n TranShkDstn : [[np.array]]\n A list with T_cycle elements, each of which is a list of two arrays\n representing a discrete approximation to the transitory income shocks.\n \"\"\"\n # Unpack the parameters from the input\n PermShkStd = self.PermShkStd\n PermShkCount = self.PermShkCount\n TranShkStd = self.TranShkStd\n TranShkCount = self.TranShkCount\n T_cycle = self.T_cycle\n T_retire = self.T_retire\n UnempPrb = self.UnempPrb\n IncUnemp = self.IncUnemp\n UnempPrbRet = self.UnempPrbRet\n IncUnempRet = self.IncUnempRet\n\n IncomeDstn = [] # Discrete approximations to income process in each period\n PermShkDstn = [] # Discrete approximations to permanent income shocks\n TranShkDstn = [] # Discrete approximations to transitory income shocks\n\n # Fill out a simple discrete RV for retirement, with value 1.0 (mean of shocks)\n # in normal times; value 0.0 in \"unemployment\" times with small prob.\n if T_retire > 0:\n if UnempPrbRet > 0:\n PermShkValsRet = np.array(\n [1.0, 1.0]\n ) # Permanent income is deterministic in retirement (2 states for temp income shocks)\n TranShkValsRet = np.array(\n [\n IncUnempRet,\n (1.0 - UnempPrbRet * IncUnempRet) / (1.0 - UnempPrbRet),\n ]\n )\n ShkPrbsRet = np.array([UnempPrbRet, 1.0 - UnempPrbRet])\n else:\n PermShkValsRet = np.array([1.0])\n TranShkValsRet = np.array([1.0])\n ShkPrbsRet = np.array([1.0])\n IncomeDstnRet = DiscreteDistribution(\n ShkPrbsRet,\n [PermShkValsRet, TranShkValsRet],\n seed=self.RNG.randint(0, 2 ** 31 - 1),\n )\n\n # Loop to fill in the list of IncomeDstn random variables.\n for t in range(T_cycle): # Iterate over all periods, counting forward\n\n if T_retire > 0 and t >= T_retire:\n # Then we are in the \"retirement period\" and add a retirement income object.\n IncomeDstn.append(deepcopy(IncomeDstnRet))\n PermShkDstn.append([np.array([1.0]), np.array([1.0])])\n TranShkDstn.append([ShkPrbsRet, TranShkValsRet])\n else:\n # We are in the \"working life\" periods.\n TranShkDstn_t = MeanOneLogNormal(sigma=TranShkStd[t]).approx(\n TranShkCount, tail_N=0\n )\n if UnempPrb > 0:\n TranShkDstn_t = addDiscreteOutcomeConstantMean(\n TranShkDstn_t, p=UnempPrb, x=IncUnemp\n )\n PermShkDstn_t = MeanOneLogNormal(sigma=PermShkStd[t]).approx(\n PermShkCount, tail_N=0\n )\n ### REPLACE\n ###REPLACE\n IncomeDstn.append(\n combineIndepDstns(\n PermShkDstn_t,\n TranShkDstn_t,\n seed=self.RNG.randint(0, 2 ** 31 - 1),\n )\n ) # mix the independent distributions\n PermShkDstn.append(PermShkDstn_t)\n TranShkDstn.append(TranShkDstn_t)\n return IncomeDstn, PermShkDstn, TranShkDstn\n\n\n# Make a dictionary to specify a \"kinked R\" idiosyncratic shock consumer\ninit_kinked_R = dict(\n init_idiosyncratic_shocks,\n **{\n \"Rboro\": 1.20, # Interest factor on assets when borrowing, a < 0\n \"Rsave\": 1.02, # Interest factor on assets when saving, a > 0\n \"BoroCnstArt\": None, # kinked R is a bit silly if borrowing not allowed\n \"CubicBool\": True, # kinked R is now compatible with linear cFunc and cubic cFunc\n \"aXtraCount\": 48, # ...so need lots of extra gridpoints to make up for it\n }\n)\ndel init_kinked_R[\"Rfree\"] # get rid of constant interest factor\n\n\nclass KinkedRconsumerType(IndShockConsumerType):\n \"\"\"\n A consumer type that faces idiosyncratic shocks to income and has a different\n interest factor on saving vs borrowing. Extends IndShockConsumerType, with\n very small changes. Solver for this class is currently only compatible with\n linear spline interpolation.\n \"\"\"\n\n time_inv_ = copy(IndShockConsumerType.time_inv_)\n time_inv_.remove(\"Rfree\")\n time_inv_ += [\"Rboro\", \"Rsave\"]\n\n def __init__(self, cycles=1, **kwds):\n \"\"\"\n Instantiate a new ConsumerType with given data.\n See ConsumerParameters.init_kinked_R for a dictionary of\n the keywords that should be passed to the constructor.\n\n Parameters\n ----------\n cycles : int\n Number of times the sequence of periods should be solved.\n\n Returns\n -------\n None\n \"\"\"\n params = init_kinked_R.copy()\n params.update(kwds)\n\n # Initialize a basic AgentType\n PerfForesightConsumerType.__init__(self, cycles=cycles, **params)\n\n # Add consumer-type specific objects, copying to create independent versions\n self.solveOnePeriod = makeOnePeriodOOSolver(ConsKinkedRsolver)\n self.update() # Make assets grid, income process, terminal solution\n\n def preSolve(self):\n # AgentType.preSolve(self)\n self.updateSolutionTerminal()\n\n def calcBoundingValues(self):\n \"\"\"\n Calculate human wealth plus minimum and maximum MPC in an infinite\n horizon model with only one period repeated indefinitely. Store results\n as attributes of self. Human wealth is the present discounted value of\n expected future income after receiving income this period, ignoring mort-\n ality. The maximum MPC is the limit of the MPC as m --> mNrmMin. The\n minimum MPC is the limit of the MPC as m --> infty. This version deals\n with the different interest rates on borrowing vs saving.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n # Unpack the income distribution and get average and worst outcomes\n PermShkValsNext = self.IncomeDstn[0][1]\n TranShkValsNext = self.IncomeDstn[0][2]\n ShkPrbsNext = self.IncomeDstn[0][0]\n ExIncNext = np.dot(ShkPrbsNext, PermShkValsNext * TranShkValsNext)\n PermShkMinNext = np.min(PermShkValsNext)\n TranShkMinNext = np.min(TranShkValsNext)\n WorstIncNext = PermShkMinNext * TranShkMinNext\n WorstIncPrb = np.sum(\n ShkPrbsNext[(PermShkValsNext * TranShkValsNext) == WorstIncNext]\n )\n\n # Calculate human wealth and the infinite horizon natural borrowing constraint\n hNrm = (ExIncNext * self.PermGroFac[0] / self.Rsave) / (\n 1.0 - self.PermGroFac[0] / self.Rsave\n )\n temp = self.PermGroFac[0] * PermShkMinNext / self.Rboro\n BoroCnstNat = -TranShkMinNext * temp / (1.0 - temp)\n\n PatFacTop = (self.DiscFac * self.LivPrb[0] * self.Rsave) ** (\n 1.0 / self.CRRA\n ) / self.Rsave\n PatFacBot = (self.DiscFac * self.LivPrb[0] * self.Rboro) ** (\n 1.0 / self.CRRA\n ) / self.Rboro\n if BoroCnstNat < self.BoroCnstArt:\n MPCmax = 1.0 # if natural borrowing constraint is overridden by artificial one, MPCmax is 1\n else:\n MPCmax = 1.0 - WorstIncPrb ** (1.0 / self.CRRA) * PatFacBot\n MPCmin = 1.0 - PatFacTop\n\n # Store the results as attributes of self\n self.hNrm = hNrm\n self.MPCmin = MPCmin\n self.MPCmax = MPCmax\n\n def makeEulerErrorFunc(self, mMax=100, approx_inc_dstn=True):\n \"\"\"\n Creates a \"normalized Euler error\" function for this instance, mapping\n from market resources to \"consumption error per dollar of consumption.\"\n Stores result in attribute eulerErrorFunc as an interpolated function.\n Has option to use approximate income distribution stored in self.IncomeDstn\n or to use a (temporary) very dense approximation.\n\n SHOULD BE INHERITED FROM ConsIndShockModel\n\n Parameters\n ----------\n mMax : float\n Maximum normalized market resources for the Euler error function.\n approx_inc_dstn : Boolean\n Indicator for whether to use the approximate discrete income distri-\n bution stored in self.IncomeDstn[0], or to use a very accurate\n discrete approximation instead. When True, uses approximation in\n IncomeDstn; when False, makes and uses a very dense approximation.\n\n Returns\n -------\n None\n \"\"\"\n raise NotImplementedError()\n\n def getRfree(self):\n \"\"\"\n Returns an array of size self.AgentCount with self.Rboro or self.Rsave in each entry, based\n on whether self.aNrmNow >< 0.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n RfreeNow : np.array\n Array of size self.AgentCount with risk free interest rate for each agent.\n \"\"\"\n RfreeNow = self.Rboro * np.ones(self.AgentCount)\n RfreeNow[self.state_prev['aNrmNow'] > 0] = self.Rsave\n return RfreeNow\n\n def checkConditions(self):\n \"\"\"\n This method checks whether the instance's type satisfies the Absolute Impatience Condition (AIC),\n the Return Impatience Condition (RIC), the Growth Impatience Condition (GIC), the Weak Return\n Impatience Condition (WRIC), the Finite Human Wealth Condition (FHWC) and the Finite Value of\n Autarky Condition (FVAC). To check which conditions are relevant to the model at hand, a\n reference to the relevant theoretical literature is made.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n raise NotImplementedError()\n\n\ndef applyFlatIncomeTax(\n IncomeDstn, tax_rate, T_retire, unemployed_indices=None, transitory_index=2\n):\n \"\"\"\n Applies a flat income tax rate to all employed income states during the working\n period of life (those before T_retire). Time runs forward in this function.\n\n Parameters\n ----------\n IncomeDstn : [income distributions]\n The discrete approximation to the income distribution in each time period.\n tax_rate : float\n A flat income tax rate to be applied to all employed income.\n T_retire : int\n The time index after which the agent retires.\n unemployed_indices : [int]\n Indices of transitory shocks that represent unemployment states (no tax).\n transitory_index : int\n The index of each element of IncomeDstn representing transitory shocks.\n\n Returns\n -------\n IncomeDstn_new : [income distributions]\n The updated income distributions, after applying the tax.\n \"\"\"\n unemployed_indices = (\n unemployed_indices if unemployed_indices is not None else list()\n )\n IncomeDstn_new = deepcopy(IncomeDstn)\n i = transitory_index\n for t in range(len(IncomeDstn)):\n if t < T_retire:\n for j in range((IncomeDstn[t][i]).size):\n if j not in unemployed_indices:\n IncomeDstn_new[t][i][j] = IncomeDstn[t][i][j] * (1 - tax_rate)\n return IncomeDstn_new\n\n\n# =======================================================\n# ================ Other useful functions ===============\n# =======================================================\n\n\ndef constructAssetsGrid(parameters):\n \"\"\"\n Constructs the base grid of post-decision states, representing end-of-period\n assets above the absolute minimum.\n\n All parameters are passed as attributes of the single input parameters. The\n input can be an instance of a ConsumerType, or a custom Parameters class.\n\n Parameters\n ----------\n aXtraMin: float\n Minimum value for the a-grid\n aXtraMax: float\n Maximum value for the a-grid\n aXtraCount: int\n Size of the a-grid\n aXtraExtra: [float]\n Extra values for the a-grid.\n exp_nest: int\n Level of nesting for the exponentially spaced grid\n\n Returns\n -------\n aXtraGrid: np.ndarray\n Base array of values for the post-decision-state grid.\n \"\"\"\n # Unpack the parameters\n aXtraMin = parameters.aXtraMin\n aXtraMax = parameters.aXtraMax\n aXtraCount = parameters.aXtraCount\n aXtraExtra = parameters.aXtraExtra\n grid_type = \"exp_mult\"\n exp_nest = parameters.aXtraNestFac\n\n # Set up post decision state grid:\n aXtraGrid = None\n if grid_type == \"linear\":\n aXtraGrid = np.linspace(aXtraMin, aXtraMax, aXtraCount)\n elif grid_type == \"exp_mult\":\n aXtraGrid = makeGridExpMult(\n ming=aXtraMin, maxg=aXtraMax, ng=aXtraCount, timestonest=exp_nest\n )\n else:\n raise Exception(\n \"grid_type not recognized in __init__.\"\n + \"Please ensure grid_type is 'linear' or 'exp_mult'\"\n )\n\n # Add in additional points for the grid:\n for a in aXtraExtra:\n if a is not None:\n if a not in aXtraGrid:\n j = aXtraGrid.searchsorted(a)\n aXtraGrid = np.insert(aXtraGrid, j, a)\n\n return aXtraGrid\n\n\n# Make a dictionary to specify a lifecycle consumer with a finite horizon\ninit_lifecycle = copy(init_idiosyncratic_shocks)\ninit_lifecycle[\"PermGroFac\"] = [\n 1.01,\n 1.01,\n 1.01,\n 1.01,\n 1.01,\n 1.02,\n 1.02,\n 1.02,\n 1.02,\n 1.02,\n]\ninit_lifecycle[\"PermShkStd\"] = [0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0, 0, 0]\ninit_lifecycle[\"TranShkStd\"] = [0.3, 0.2, 0.1, 0.3, 0.2, 0.1, 0.3, 0, 0, 0]\ninit_lifecycle[\"LivPrb\"] = [0.99, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]\ninit_lifecycle[\"T_cycle\"] = 10\ninit_lifecycle[\"T_retire\"] = 7\ninit_lifecycle[\n \"T_age\"\n] = 11 # Make sure that old people die at terminal age and don't turn into newborns!\n\n# Make a dictionary to specify an infinite consumer with a four period cycle\ninit_cyclical = copy(init_idiosyncratic_shocks)\ninit_cyclical['PermGroFac'] = [1.082251, 2.8, 0.3, 1.1]\ninit_cyclical['PermShkStd'] = [0.1,0.1,0.1,0.1]\ninit_cyclical['TranShkStd'] = [0.1,0.1,0.1,0.1]\ninit_cyclical['LivPrb'] = 4*[0.98]\ninit_cyclical['T_cycle'] = 4\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.linspace", "numpy.min", "numpy.asarray", "numpy.tile", "numpy.ones", "numpy.concatenate", "numpy.max", "numpy.append", "numpy.logical_or", "numpy.where", "numpy.insert", "numpy.array", "numpy.zeros", "numpy.sum", "scipy.optimize.newton" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
predatorx7/Zero-DCE
[ "b4fa00f315bc650fc697adf84724afd964b6cd5f" ]
[ "ZeroDCE/zero_dce_lite.py" ]
[ "'''\nMIT License\n\nCopyright (c) 2021 Tauhid Khan\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import initializers\n\n\nclass ZeroDCE_lite(tf.keras.Model):\n\n def __init__(self, name:str = \"DCE-net_lite\", filters:int = 32, iteration: int = 8, IMG_H:int = 384, IMG_W:int =512, IMG_C:int = 3, **kwargs):\n super(ZeroDCE_lite, self).__init__(name=name, **kwargs)\n self.filters = filters\n self.iteration = iteration\n self.IMG_H = IMG_H\n self.IMG_W = IMG_W\n self.IMG_C = IMG_C\n \n self.depth_conv1 = layers.SeparableConv2D(\n filters=self.filters,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='same',\n activation='relu',\n depth_multiplier=1,\n depthwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02),\n pointwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02)\n )\n\n self.depth_conv2 = layers.SeparableConv2D(\n filters=self.filters,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='same',\n activation='relu',\n depth_multiplier=1,\n depthwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02),\n pointwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02)\n )\n\n self.depth_conv3 = layers.SeparableConv2D(\n filters=self.filters,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='same',\n activation='relu',\n depth_multiplier=1,\n depthwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02),\n pointwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02)\n )\n\n self.depth_conv4 = layers.SeparableConv2D(\n filters=self.filters,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='same',\n activation='relu',\n depth_multiplier=1,\n depthwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02),\n pointwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02)\n )\n\n self.depth_conv5 = layers.SeparableConv2D(\n filters=self.filters,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='same',\n activation='relu',\n depth_multiplier=1,\n depthwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02),\n pointwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02)\n )\n\n self.depth_conv6 = layers.SeparableConv2D(\n filters=self.filters,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='same',\n activation='relu',\n depth_multiplier=1,\n depthwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02),\n pointwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02)\n )\n\n self.depth_conv_out = layers.SeparableConv2D(\n filters=3,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='same',\n activation='tanh',\n depth_multiplier=1,\n depthwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02),\n pointwise_initializer=initializers.RandomNormal(mean=0.0, stddev=0.02)\n )\n\n \n def call(self, inputs: tf.Tensor):\n x1 = self.depth_conv1(inputs)\n x2 = self.depth_conv2(x1)\n x3 = self.depth_conv3(x2)\n x4 = self.depth_conv4(x3)\n x5 = self.depth_conv5(tf.concat([x3, x4], axis=-1))\n x6 = self.depth_conv6(tf.concat([x2, x5], axis=-1))\n a_map = self.depth_conv_out(tf.concat([x1, x6], axis=-1))\n\n enhanced_image = inputs\n for _ in range(self.iteration):\n enhanced_image = enhanced_image + a_map * (tf.square(enhanced_image) - enhanced_image)\n \n return enhanced_image, a_map\n\n def compile(\n self,\n optimizer: tf.keras.optimizers.Optimizer,\n spatial_consistency_loss: tf.keras.losses.Loss,\n exposure_control_loss: tf.keras.losses.Loss,\n color_constancy_loss: tf.keras.losses.Loss,\n illumination_smoothness_loss: tf.keras.losses.Loss,\n loss_weights: dict = {\n 'spatial_consistency_w': 1.0,\n 'exposure_control_w': 70.0,\n 'color_constancy_w': 200.0,\n 'illumination_smoothness_w': 1200.0\n },\n **kwargs\n ):\n super(ZeroDCE_lite, self).compile(**kwargs)\n self.optimizer = optimizer\n self.spatial_consistency_loss = spatial_consistency_loss\n self.exposure_control_loss = exposure_control_loss\n self.color_constancy_loss = color_constancy_loss\n self.illumination_smoothness_loss = illumination_smoothness_loss\n self.loss_weights = loss_weights\n \n def compute_losses(self, input_img:tf.Tensor, enhanced_img: tf.Tensor, a_maps: tf.Tensor)-> dict:\n '''\n Compute all zero reference DCE losses\n args:\n input_img: tf.Tensor, input image\n enhanced_img: tf.Tensor, enhanced image\n a_maps: tf.Tensor, Alpha maps of enhanced image\n return:\n dict, loss dictionary\n '''\n l_spa = self.loss_weights['spatial_consistency_w'] * self.spatial_consistency_loss(input_img,enhanced_img)\n l_exp = self.loss_weights['exposure_control_w'] * self.exposure_control_loss(enhanced_img)\n l_col = self.loss_weights['color_constancy_w'] * self.color_constancy_loss(enhanced_img)\n l_ill = self.loss_weights['illumination_smoothness_w'] * self.illumination_smoothness_loss(a_maps)\n\n total_loss = l_spa + l_exp + l_col + l_ill\n\n return {\n 'total_loss': total_loss,\n 'spatial_consistency_loss': l_spa,\n 'exposure_control_loss': l_exp,\n 'color_constancy_loss': l_col,\n 'illumination_smoothness_loss': l_ill\n }\n\n @tf.function\n def train_step(self, inputs: tf.Tensor) -> dict:\n '''\n Forward pass, calculate total loss, and calculate gradients with respect to loss.\n args:\n inputs: tf.Tensor, Tensor of shape (batch_size, IMG_H, IMG_W, IMG_C)\n returns:\n loss: tf.Tensor, Tensor of shape (batch_size, 1)\n '''\n with tf.GradientTape() as tape:\n enhanced_img, a_maps = self(inputs)\n losses = self.compute_losses(inputs, enhanced_img, a_maps)\n \n # Calculate gradients\n gradients = tape.gradient(losses['total_loss'], self.trainable_variables)\n # Backpropagate gradients to update weights.\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n\n return losses\n \n @tf.function\n def test_step(self, inputs: tf.Tensor)-> dict:\n '''\n Forward pass, calculate total loss.\n args:\n inputs: tf.Tensor, Tensor of shape (batch_size, IMG_H, IMG_W, IMG_C)\n returns:\n dict, validation loss dictionary\n '''\n enahncened_img, a_maps = self(inputs)\n val_losses = self.compute_losses(inputs, enahncened_img, a_maps)\n return val_losses\n\n def summary(self, plot:bool = False):\n x = tf.keras.Input(shape=(self.IMG_H, self.IMG_W, self.IMG_C))\n model = tf.keras.Model(inputs=[x], outputs=self.call(x), name='DCE-net_lite')\n if plot:\n tf.keras.utils.plot_model(model, to_file='image_assets/DCE-net_lite.png', show_shapes=True, show_layer_names=True, rankdir='TB')\n return model.summary()\n \n def get_config(self):\n return {\n 'filters': self.filters,\n 'iteration': self.iteration\n }\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n \n\nif __name__ == \"__main__\":\n x = tf.random.normal([1, 400, 600, 3])\n model = ZeroDCE_lite(filters=24, iteration=8, IMG_H=400, IMG_W=600, IMG_C=3)\n tf.print(model.summary(plot=True))\n tf.print(model.get_config())\n y,a_map = model(x)\n tf.print(y.shape)\n tf.print(a_map.shape)\n" ]
[ [ "tensorflow.concat", "tensorflow.keras.Input", "tensorflow.keras.utils.plot_model", "tensorflow.square", "tensorflow.print", "tensorflow.keras.initializers.RandomNormal", "tensorflow.random.normal", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
wils0674/dep
[ "7189a26fbcec3d8c3cc7d7015107b8fb6c5f6a45" ]
[ "scripts/util/dump_yearly.py" ]
[ "\"\"\"Dump some monthly data\"\"\"\n\nimport pandas as pd\nfrom pandas.io.sql import read_sql\nfrom pyiem.util import get_dbconn\n\n# East Nish\nDATA = \"\"\"102400030603\n102400030704\n102400030701\n102400030702\n102400030601\n102400030602\n102400030303\n102400030304\n102400030406\n102400030502\n102400030302\n102400030104\n102400030301\n102400030404\n102400030405\n102400030102\n102400030501\n102400030103\n102400030206\n102400030402\n102400030101\n102400030401\n102400030403\n102400030204\n102400030205\n102400030203\n102400030202\n102400030201\n102400030707\n102400030705\n102400030708\n102400030703\n102400030706\"\"\"\nDATA = \"\"\"102400020402\n102400020607\n102400020505\n102400020703\n102400020606\n102400020804\n102400020803\n102400020705\n102400020802\n102400020704\n102400020805\n102400020801\n102400020303\n102400020504\n102400020401\n102400020503\n102400020702\n102400020301\n102400020605\n102400020502\n102400020603\n102400020302\n102400020501\n102400020602\n102400020105\n102400020604\n102400020701\n102400020106\n102400020209\n102400020601\n102400020104\n102400020103\n102400020208\n102400020207\n102400020102\n102400020101\n102400020203\n102400020205\n102400020202\n102400020206\n102400020204\n102400020201\n102400020806\n102400020706\"\"\"\n# Beaver Creek\nDATA = \"\"\"071000040901\n071000040902\n071000040903\n071000040904\n071000040905\n071000040906\n071000040907\n071000040908\n071000040909\n071000040910\n071000040911\"\"\"\n# North Raccoon\nDATA = \"\"\"071000061502\n071000061602\n071000060605\n071000061201\n071000060401\n071000061501\n071000060802\n071000060208\n071000060403\n071000061202\n071000060602\n071000060207\n071000060502\n071000061004\n071000061402\n071000061204\n071000060805\n071000060201\n071000061001\n071000060904\n071000060702\n071000061002\n071000060203\n071000060205\n071000061703\n071000060304\n071000060601\n071000060310\n071000061405\n071000061203\n071000060804\n071000060903\n071000060604\n071000060803\n071000060505\n071000061701\n071000060303\n071000061702\n071000061301\n071000061302\n071000061005\n071000061401\n071000060308\n071000061504\n071000060306\n071000060301\n071000061003\n071000061102\n071000060902\n071000060901\n071000060603\n071000060305\n071000060701\n071000060503\n071000060101\n071000060103\n071000060204\n071000061403\n071000061404\n071000060206\n071000060307\n071000061503\n071000060309\n071000060302\n071000060202\n071000060801\n071000061406\n071000060504\n071000060501\n071000061601\n071000061505\n071000060402\n071000061101\n071000060806\n071000060102\"\"\"\n\nHUCS = [x.strip() for x in DATA.split(\"\\n\")]\n\n\ndef main():\n \"\"\"Go Main Go\"\"\"\n pgconn = get_dbconn(\"idep\")\n df = read_sql(\n \"\"\"\n SELECT huc_12, extract(year from valid) as year,\n sum(avg_loss) * 4.463 as loss_ton_per_acre,\n sum(avg_delivery) * 4.463 as delivery_ton_per_acre,\n sum(qc_precip) / 25.4 as precip_inch,\n sum(avg_runoff) / 25.4 as runoff_inch\n from results_by_huc12 WHERE\n scenario = 0 and huc_12 in %s and valid >= '2007-01-01'\n and valid < '2018-01-01' GROUP by huc_12, year\n \"\"\",\n pgconn,\n params=(tuple(HUCS),),\n )\n writer = pd.ExcelWriter(\n \"dep_yearly.xlsx\", options={\"remove_timezone\": True}\n )\n df.to_excel(writer, \"Yearly Totals\", index=False)\n gdf = df.groupby(\"huc_12\").mean()\n gdf[\n [\n \"loss_ton_per_acre\",\n \"delivery_ton_per_acre\",\n \"precip_inch\",\n \"runoff_inch\",\n ]\n ].to_excel(writer, \"Yearly Averages\")\n format1 = writer.book.add_format({\"num_format\": \"0.00\"})\n worksheet = writer.sheets[\"Yearly Totals\"]\n worksheet.set_column(\"A:A\", 18)\n worksheet.set_column(\"C:F\", 20, format1)\n worksheet = writer.sheets[\"Yearly Averages\"]\n worksheet.set_column(\"A:A\", 18)\n worksheet.set_column(\"B:E\", 20, format1)\n writer.save()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.ExcelWriter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
awais307/pyam
[ "e7828beae490ed10857c9d8cb5a42705a7cf62f0" ]
[ "tests/test_io.py" ]
[ "from pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport pytest\n\nfrom pyam import IamDataFrame, read_datapackage\nfrom pyam.utils import META_IDX\nfrom pyam.testing import assert_iamframe_equal\n\nfrom conftest import TEST_DATA_DIR, META_DF\n\nFILTER_ARGS = dict(scenario=\"scen_a\")\n\n\ndef test_data_none():\n # initializing with 'data=None' raises an error\n match = \"IamDataFrame constructor not properly called!\"\n with pytest.raises(ValueError, match=match):\n IamDataFrame(None)\n\n\ndef test_unknown_type():\n # initializing with unsupported argument type raises an error\n match = \"IamDataFrame constructor not properly called!\"\n with pytest.raises(ValueError, match=match):\n IamDataFrame(True)\n\n\ndef test_not_a_file():\n # initializing with a file-like that's not a file raises an error\n match = \"File foo.csv does not exist\"\n with pytest.raises(FileNotFoundError, match=match):\n IamDataFrame(\"foo.csv\")\n\n\ndef test_io_list():\n # initializing with a list raises an error\n match = r\"Initializing from list is not supported,*.\"\n with pytest.raises(ValueError, match=match):\n IamDataFrame([1, 2])\n\n\ndef test_io_csv(test_df, tmpdir):\n # write to csv\n file = tmpdir / \"testing_io_write_read.csv\"\n test_df.to_csv(file)\n\n # read from csv and assert that `data` tables are equal\n import_df = IamDataFrame(file)\n pd.testing.assert_frame_equal(test_df.data, import_df.data)\n\n\[email protected](\n \"meta_args\", [[{}, {}], [dict(include_meta=\"foo\"), dict(meta_sheet_name=\"foo\")]]\n)\ndef test_io_xlsx(test_df, meta_args, tmpdir):\n # write to xlsx (direct file name and ExcelWriter, see #300)\n file = tmpdir / \"testing_io_write_read.xlsx\"\n for f in [file, pd.ExcelWriter(file)]:\n test_df.to_excel(f, **meta_args[0])\n if isinstance(f, pd.ExcelWriter):\n f.close()\n\n # read from xlsx\n import_df = IamDataFrame(file, **meta_args[1])\n\n # assert that IamDataFrame instances are equal\n assert_iamframe_equal(test_df, import_df)\n\n\[email protected](\n \"sheets, sheetname\",\n [\n [[\"data1\", \"data2\"], dict(sheet_name=\"data*\")],\n [[\"data1\", \"foo\"], dict(sheet_name=[\"data*\", \"foo\"])],\n ],\n)\ndef test_io_xlsx_multiple_data_sheets(test_df, sheets, sheetname, tmpdir):\n # write data to separate sheets in excel file\n file = tmpdir / \"testing_io_write_read.xlsx\"\n xl = pd.ExcelWriter(file)\n for i, (model, scenario) in enumerate(test_df.index):\n test_df.filter(scenario=scenario).to_excel(xl, sheet_name=sheets[i])\n test_df.export_meta(xl)\n xl.close()\n\n # read from xlsx\n import_df = IamDataFrame(file, **sheetname)\n\n # assert that IamDataFrame instances are equal\n assert_iamframe_equal(test_df, import_df)\n\n\ndef test_init_df_with_na_unit(test_pd_df, tmpdir):\n # missing values in the unit column are replaced by an empty string\n test_pd_df.loc[1, \"unit\"] = np.nan\n df = IamDataFrame(test_pd_df)\n assert df.unit == [\"\", \"EJ/yr\"]\n\n # writing to file and importing as pandas returns `nan`, not empty string\n file = tmpdir / \"na_unit.csv\"\n df.to_csv(file)\n df_csv = pd.read_csv(file)\n assert np.isnan(df_csv.loc[1, \"Unit\"])\n IamDataFrame(file) # reading from file as IamDataFrame works\n\n file = tmpdir / \"na_unit.xlsx\"\n df.to_excel(file)\n df_excel = pd.read_excel(file)\n assert np.isnan(df_excel.loc[1, \"Unit\"])\n IamDataFrame(file) # reading from file as IamDataFrame works\n\n\[email protected](\n \"sheet_name, init_args, rename\",\n [\n (\"meta\", {}, False),\n (\"meta\", dict(sheet_name=\"meta\"), False),\n (\"foo\", dict(sheet_name=\"foo\"), False),\n (\"foo\", dict(sheet_name=\"foo\"), True),\n ],\n)\ndef test_load_meta_xlsx(test_pd_df, sheet_name, init_args, rename, tmpdir):\n \"\"\"Test loading meta from an Excel file\"\"\"\n # downselect meta\n meta = META_DF.iloc[0:1] if rename else META_DF\n\n # initialize a new IamDataFrame directly from data and meta\n exp = IamDataFrame(test_pd_df, meta=meta)\n\n # write meta to file (without an exclude col)\n file = tmpdir / \"testing_io_meta.xlsx\"\n meta.reset_index().to_excel(file, sheet_name=sheet_name, index=False)\n\n # initialize a new IamDataFrame and load meta from file\n obs = IamDataFrame(test_pd_df)\n obs.load_meta(file)\n\n assert_iamframe_equal(obs, exp)\n\n\[email protected](\"rename\", [True, False])\ndef test_load_meta_csv(test_pd_df, rename, tmpdir):\n \"\"\"Test loading meta from an csv file\"\"\"\n meta = META_DF.iloc[0:1] if rename else META_DF\n\n # initialize a new IamDataFrame directly from data and meta\n exp = IamDataFrame(test_pd_df, meta=meta)\n\n # write meta to file (without an exclude col)\n file = tmpdir / \"testing_io_meta.csv\"\n meta.reset_index().to_csv(file, index=False)\n\n # initialize a new IamDataFrame and load meta from file\n obs = IamDataFrame(test_pd_df)\n obs.load_meta(file)\n\n assert_iamframe_equal(obs, exp)\n\n\ndef test_load_meta_wrong_index(test_df_year, tmpdir):\n \"\"\"Loading meta without (at least) index cols as headers raises an error\"\"\"\n\n # write meta frame with wrong index to file, then load to the IamDataFrame\n file = tmpdir / \"testing_meta_empty.xlsx\"\n pd.DataFrame(columns=[\"model\", \"foo\"]).to_excel(file, index=False)\n\n match = \".* \\(sheet meta\\) missing required index columns \\['scenario'\\]\\!\"\n with pytest.raises(ValueError, match=match):\n test_df_year.load_meta(file)\n\n\ndef test_load_meta_empty_rows(test_df_year, tmpdir):\n \"\"\"Loading empty meta table (columns but no rows) from xlsx file\"\"\"\n exp = test_df_year.copy() # loading empty file has no effect\n\n # write empty meta frame to file, then load to the IamDataFrame\n file = tmpdir / \"testing_meta_empty.xlsx\"\n pd.DataFrame(columns=META_IDX).to_excel(file, index=False)\n test_df_year.load_meta(file)\n\n assert_iamframe_equal(test_df_year, exp)\n\n\ndef test_load_meta_empty(test_pd_df):\n \"\"\"Initializing from xlsx where 'meta' has no rows and non-empty invisible header\"\"\"\n obs = IamDataFrame(TEST_DATA_DIR / \"empty_meta_sheet.xlsx\")\n exp = IamDataFrame(test_pd_df)\n assert_iamframe_equal(obs, exp)\n\n\ndef test_load_ssp_database_downloaded_file(test_pd_df):\n exp = IamDataFrame(test_pd_df).filter(**FILTER_ARGS).as_pandas()\n file = TEST_DATA_DIR / \"test_SSP_database_raw_download.xlsx\"\n obs_df = IamDataFrame(file)\n pd.testing.assert_frame_equal(obs_df.as_pandas(), exp)\n\n\ndef test_load_rcp_database_downloaded_file(test_pd_df):\n exp = IamDataFrame(test_pd_df).filter(**FILTER_ARGS).as_pandas()\n file = TEST_DATA_DIR / \"test_RCP_database_raw_download.xlsx\"\n obs_df = IamDataFrame(file)\n pd.testing.assert_frame_equal(obs_df.as_pandas(), exp)\n\n\ndef test_io_datapackage(test_df, tmpdir):\n # add column to `meta` and write to datapackage\n file = Path(tmpdir) / \"foo.zip\"\n test_df.set_meta([\"a\", \"b\"], \"string\")\n test_df.to_datapackage(file)\n\n # read from csv assert that IamDataFrame instances are equal\n import_df = read_datapackage(file)\n assert_iamframe_equal(test_df, import_df)\n" ]
[ [ "pandas.read_excel", "pandas.read_csv", "numpy.isnan", "pandas.DataFrame", "pandas.testing.assert_frame_equal", "pandas.ExcelWriter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
IsaacGuan/SGSG
[ "7476f1b8ec0ed90cd9896ee2d23241c4310cb78c" ]
[ "test.py" ]
[ "import os\nimport argparse\nimport heapq\nimport pandas as pd\n\nfrom MDN import *\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nDATA_DIR = os.path.join(BASE_DIR, 'data')\nRESULTS_DIR = os.path.join(BASE_DIR, 'results')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_name', type=str, default='chairs', help='name of the dataset (chairs/lamps/tables)')\n parser.add_argument('--labels_file', type=str, default='chairs_artificial_labels', help='file name the spreadsheet of user-provided labels saved in the data folder')\n parser.add_argument('--gaussian_idx', type=int, default=0, help='the index of Gaussian (sorted in the descending order of alpha) for generating the mu and sigma')\n args = parser.parse_args()\n dataset_name = args.dataset_name\n labels_file = args.labels_file\n gaussian_idx = args.gaussian_idx\n\n if not os.path.exists(os.path.join(RESULTS_DIR, dataset_name + '_mdn.h5')):\n raise('Network has not been trained!')\n\n if dataset_name == 'chairs':\n mdn = MDN(labels_dim=25)\n elif dataset_name == 'lamps':\n mdn = MDN(labels_dim=24)\n elif dataset_name == 'tables':\n mdn = MDN(labels_dim=20)\n else:\n raise('Dataset is not supported!')\n\n labels = pd.read_excel(os.path.join(DATA_DIR, labels_file + '.xlsx'))\n labels = pd.DataFrame(labels)\n labels = labels.iloc[:, 3:]\n labels = labels.to_numpy()\n labels = np.transpose(labels)\n labels = np.expand_dims(labels, axis=1)\n\n alpha, mu, sigma = mdn.test(\n labels_test = labels,\n weights_dir = os.path.join(RESULTS_DIR, dataset_name + '_mdn.h5'))\n\n final_mu = []\n final_sigma = []\n for i in range(alpha.shape[0]):\n idx_sorted = heapq.nlargest(5, range(len(alpha[i, 0])), key=alpha[i, 0].__getitem__)\n alpha_sorted = heapq.nlargest(5, alpha[i, 0])\n idx_selected = idx_sorted[gaussian_idx]\n alpha_selected = alpha_sorted[gaussian_idx]\n final_mu.append(mu[i, :, idx_selected])\n final_sigma.append(mu[i, :, idx_selected])\n final_mu = np.array(final_mu)\n final_sigma = np.array(final_sigma)\n\n np.savetxt(os.path.join(RESULTS_DIR, dataset_name + '_mu.csv'), final_mu, delimiter=',')\n np.savetxt(os.path.join(RESULTS_DIR, dataset_name + '_sigma.csv'), final_sigma, delimiter=',')\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
mayanksuman/xgboost
[ "2e052e74b627659b97ce741be5b64581479303c3" ]
[ "python-package/xgboost/core.py" ]
[ "# coding: utf-8\n# pylint: disable=too-many-arguments, too-many-branches, invalid-name\n# pylint: disable=too-many-branches, too-many-lines, W0141\n\"\"\"Core XGBoost Library.\"\"\"\nfrom __future__ import absolute_import\nimport collections\n# pylint: disable=no-name-in-module,import-error\ntry:\n from collections.abc import Mapping # Python 3\nexcept ImportError:\n from collections import Mapping # Python 2\n# pylint: enable=no-name-in-module,import-error\nimport ctypes\nimport os\nimport re\nimport sys\nimport warnings\n\nimport numpy as np\nimport scipy.sparse\n\nfrom .compat import (STRING_TYPES, PY3, DataFrame, MultiIndex, py_str,\n PANDAS_INSTALLED, DataTable)\nfrom .libpath import find_lib_path\n\n\n# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h\nc_bst_ulong = ctypes.c_uint64\n\n\nclass XGBoostError(Exception):\n \"\"\"Error thrown by xgboost trainer.\"\"\"\n pass\n\n\nclass EarlyStopException(Exception):\n \"\"\"Exception to signal early stopping.\n\n Parameters\n ----------\n best_iteration : int\n The best iteration stopped.\n \"\"\"\n def __init__(self, best_iteration):\n super(EarlyStopException, self).__init__()\n self.best_iteration = best_iteration\n\n\n# Callback environment used by callbacks\nCallbackEnv = collections.namedtuple(\n \"XGBoostCallbackEnv\",\n [\"model\",\n \"cvfolds\",\n \"iteration\",\n \"begin_iteration\",\n \"end_iteration\",\n \"rank\",\n \"evaluation_result_list\"])\n\n\ndef from_pystr_to_cstr(data):\n \"\"\"Convert a list of Python str to C pointer\n\n Parameters\n ----------\n data : list\n list of str\n \"\"\"\n\n if isinstance(data, list):\n pointers = (ctypes.c_char_p * len(data))()\n if PY3:\n data = [bytes(d, 'utf-8') for d in data]\n else:\n data = [d.encode('utf-8') if isinstance(d, unicode) else d\n for d in data]\n pointers[:] = data\n return pointers\n else:\n # copy from above when we actually use it\n raise NotImplementedError\n\n\ndef from_cstr_to_pystr(data, length):\n \"\"\"Revert C pointer to Python str\n\n Parameters\n ----------\n data : ctypes pointer\n pointer to data\n length : ctypes pointer\n pointer to length of data\n \"\"\"\n if PY3:\n res = []\n for i in range(length.value):\n try:\n res.append(str(data[i].decode('ascii')))\n except UnicodeDecodeError:\n res.append(str(data[i].decode('utf-8')))\n else:\n res = []\n for i in range(length.value):\n try:\n res.append(str(data[i].decode('ascii')))\n except UnicodeDecodeError:\n res.append(unicode(data[i].decode('utf-8')))\n return res\n\n\ndef _log_callback(msg):\n \"\"\"Redirect logs from native library into Python console\"\"\"\n print(\"{0:s}\".format(py_str(msg)))\n\n\ndef _get_log_callback_func():\n \"\"\"Wrap log_callback() method in ctypes callback type\"\"\"\n # pylint: disable=invalid-name\n CALLBACK = ctypes.CFUNCTYPE(None, ctypes.c_char_p)\n return CALLBACK(_log_callback)\n\n\ndef _load_lib():\n \"\"\"Load xgboost Library.\"\"\"\n lib_paths = find_lib_path()\n if len(lib_paths) == 0:\n return None\n try:\n pathBackup = os.environ['PATH'].split(os.pathsep)\n except KeyError:\n pathBackup = []\n lib_success = False\n os_error_list = []\n for lib_path in lib_paths:\n try:\n # needed when the lib is linked with non-system-available dependencies\n os.environ['PATH'] = os.pathsep.join(pathBackup + [os.path.dirname(lib_path)])\n lib = ctypes.cdll.LoadLibrary(lib_path)\n lib_success = True\n except OSError as e:\n os_error_list.append(str(e))\n continue\n finally:\n os.environ['PATH'] = os.pathsep.join(pathBackup)\n if not lib_success:\n libname = os.path.basename(lib_paths[0])\n raise XGBoostError(\n 'XGBoost Library ({}) could not be loaded.\\n'.format(libname) +\n 'Likely causes:\\n' +\n ' * OpenMP runtime is not installed ' +\n '(vcomp140.dll or libgomp-1.dll for Windows, ' +\n 'libgomp.so for UNIX-like OSes)\\n' +\n ' * You are running 32-bit Python on a 64-bit OS\\n' +\n 'Error message(s): {}\\n'.format(os_error_list))\n lib.XGBGetLastError.restype = ctypes.c_char_p\n lib.callback = _get_log_callback_func()\n if lib.XGBRegisterLogCallback(lib.callback) != 0:\n raise XGBoostError(lib.XGBGetLastError())\n return lib\n\n\n# load the XGBoost library globally\n_LIB = _load_lib()\n\n\ndef _check_call(ret):\n \"\"\"Check the return value of C API call\n\n This function will raise exception when error occurs.\n Wrap every API call with this function\n\n Parameters\n ----------\n ret : int\n return value from API calls\n \"\"\"\n if ret != 0:\n raise XGBoostError(py_str(_LIB.XGBGetLastError()))\n\n\ndef ctypes2numpy(cptr, length, dtype):\n \"\"\"Convert a ctypes pointer array to a numpy array.\n \"\"\"\n NUMPY_TO_CTYPES_MAPPING = {\n np.float32: ctypes.c_float,\n np.uint32: ctypes.c_uint,\n }\n if dtype not in NUMPY_TO_CTYPES_MAPPING:\n raise RuntimeError('Supported types: {}'.format(NUMPY_TO_CTYPES_MAPPING.keys()))\n ctype = NUMPY_TO_CTYPES_MAPPING[dtype]\n if not isinstance(cptr, ctypes.POINTER(ctype)):\n raise RuntimeError('expected {} pointer'.format(ctype))\n res = np.zeros(length, dtype=dtype)\n if not ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]):\n raise RuntimeError('memmove failed')\n return res\n\n\ndef ctypes2buffer(cptr, length):\n \"\"\"Convert ctypes pointer to buffer type.\"\"\"\n if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):\n raise RuntimeError('expected char pointer')\n res = bytearray(length)\n rptr = (ctypes.c_char * length).from_buffer(res)\n if not ctypes.memmove(rptr, cptr, length):\n raise RuntimeError('memmove failed')\n return res\n\n\ndef c_str(string):\n \"\"\"Convert a python string to cstring.\"\"\"\n return ctypes.c_char_p(string.encode('utf-8'))\n\n\ndef c_array(ctype, values):\n \"\"\"Convert a python string to c array.\"\"\"\n if isinstance(values, np.ndarray) and values.dtype.itemsize == ctypes.sizeof(ctype):\n return (ctype * len(values)).from_buffer_copy(values)\n return (ctype * len(values))(*values)\n\n\nPANDAS_DTYPE_MAPPER = {'int8': 'int', 'int16': 'int', 'int32': 'int', 'int64': 'int',\n 'uint8': 'int', 'uint16': 'int', 'uint32': 'int', 'uint64': 'int',\n 'float16': 'float', 'float32': 'float', 'float64': 'float',\n 'bool': 'i'}\n\n\ndef _maybe_pandas_data(data, feature_names, feature_types):\n \"\"\" Extract internal data from pd.DataFrame for DMatrix data \"\"\"\n\n if not isinstance(data, DataFrame):\n return data, feature_names, feature_types\n\n data_dtypes = data.dtypes\n if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in data_dtypes):\n bad_fields = [data.columns[i] for i, dtype in\n enumerate(data_dtypes) if dtype.name not in PANDAS_DTYPE_MAPPER]\n\n msg = \"\"\"DataFrame.dtypes for data must be int, float or bool.\n Did not expect the data types in fields \"\"\"\n raise ValueError(msg + ', '.join(bad_fields))\n\n if feature_names is None:\n if isinstance(data.columns, MultiIndex):\n feature_names = [\n ' '.join(map(str, i))\n for i in data.columns\n ]\n else:\n feature_names = data.columns.format()\n\n if feature_types is None:\n feature_types = [PANDAS_DTYPE_MAPPER[dtype.name] for dtype in data_dtypes]\n\n data = data.values.astype('float')\n\n return data, feature_names, feature_types\n\n\ndef _maybe_pandas_label(label):\n \"\"\" Extract internal data from pd.DataFrame for DMatrix label \"\"\"\n\n if isinstance(label, DataFrame):\n if len(label.columns) > 1:\n raise ValueError('DataFrame for label cannot have multiple columns')\n\n label_dtypes = label.dtypes\n if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in label_dtypes):\n raise ValueError('DataFrame.dtypes for label must be int, float or bool')\n else:\n label = label.values.astype('float')\n # pd.Series can be passed to xgb as it is\n\n return label\n\n\nDT_TYPE_MAPPER = {'bool': 'bool', 'int': 'int', 'real': 'float'}\n\nDT_TYPE_MAPPER2 = {'bool': 'i', 'int': 'int', 'real': 'float'}\n\n\ndef _maybe_dt_data(data, feature_names, feature_types):\n \"\"\"\n Validate feature names and types if data table\n \"\"\"\n if not isinstance(data, DataTable):\n return data, feature_names, feature_types\n\n data_types_names = tuple(lt.name for lt in data.ltypes)\n bad_fields = [data.names[i]\n for i, type_name in enumerate(data_types_names)\n if type_name not in DT_TYPE_MAPPER]\n if bad_fields:\n msg = \"\"\"DataFrame.types for data must be int, float or bool.\n Did not expect the data types in fields \"\"\"\n raise ValueError(msg + ', '.join(bad_fields))\n\n if feature_names is None:\n feature_names = data.names\n\n # always return stypes for dt ingestion\n if feature_types is not None:\n raise ValueError('DataTable has own feature types, cannot pass them in')\n else:\n feature_types = np.vectorize(DT_TYPE_MAPPER2.get)(data_types_names)\n\n return data, feature_names, feature_types\n\n\ndef _maybe_dt_array(array):\n \"\"\" Extract numpy array from single column data table \"\"\"\n if not isinstance(array, DataTable) or array is None:\n return array\n\n if array.shape[1] > 1:\n raise ValueError('DataTable for label or weight cannot have multiple columns')\n\n # below requires new dt version\n # extract first column\n array = array.to_numpy()[:, 0].astype('float')\n\n return array\n\n\nclass DMatrix(object):\n \"\"\"Data Matrix used in XGBoost.\n\n DMatrix is a internal data structure that used by XGBoost\n which is optimized for both memory efficiency and training speed.\n You can construct DMatrix from numpy.arrays\n \"\"\"\n\n _feature_names = None # for previous version's pickle\n _feature_types = None\n\n def __init__(self, data, label=None, missing=None,\n weight=None, silent=False,\n feature_names=None, feature_types=None,\n nthread=None):\n \"\"\"\n Parameters\n ----------\n data : string/numpy.array/scipy.sparse/pd.DataFrame/dt.Frame\n Data source of DMatrix.\n When data is string type, it represents the path libsvm format txt file,\n or binary file that xgboost can read from.\n label : list or numpy 1-D array, optional\n Label of the training data.\n missing : float, optional\n Value in the data which needs to be present as a missing value. If\n None, defaults to np.nan.\n weight : list or numpy 1-D array , optional\n Weight for each instance.\n\n .. note:: For ranking task, weights are per-group.\n\n In ranking task, one weight is assigned to each group (not each data\n point). This is because we only care about the relative ordering of\n data points within each group, so it doesn't make sense to assign\n weights to individual data points.\n\n silent : boolean, optional\n Whether print messages during construction\n feature_names : list, optional\n Set names for features.\n feature_types : list, optional\n Set types for features.\n nthread : integer, optional\n Number of threads to use for loading data from numpy array. If -1,\n uses maximum threads available on the system.\n \"\"\"\n # force into void_p, mac need to pass things in as void_p\n if data is None:\n self.handle = None\n\n if feature_names is not None:\n self._feature_names = feature_names\n if feature_types is not None:\n self._feature_types = feature_types\n return\n\n data, feature_names, feature_types = _maybe_pandas_data(data,\n feature_names,\n feature_types)\n\n data, feature_names, feature_types = _maybe_dt_data(data,\n feature_names,\n feature_types)\n label = _maybe_pandas_label(label)\n label = _maybe_dt_array(label)\n weight = _maybe_dt_array(weight)\n\n if isinstance(data, list):\n warnings.warn('Initializing DMatrix from List is deprecated.',\n DeprecationWarning)\n\n if isinstance(data, STRING_TYPES):\n handle = ctypes.c_void_p()\n _check_call(_LIB.XGDMatrixCreateFromFile(c_str(data),\n ctypes.c_int(silent),\n ctypes.byref(handle)))\n self.handle = handle\n elif isinstance(data, scipy.sparse.csr_matrix):\n self._init_from_csr(data)\n elif isinstance(data, scipy.sparse.csc_matrix):\n self._init_from_csc(data)\n elif isinstance(data, np.ndarray):\n self._init_from_npy2d(data, missing, nthread)\n elif isinstance(data, DataTable):\n self._init_from_dt(data, nthread)\n else:\n try:\n csr = scipy.sparse.csr_matrix(data)\n self._init_from_csr(csr)\n except:\n raise TypeError('can not initialize DMatrix from'\n ' {}'.format(type(data).__name__))\n\n if label is not None:\n if isinstance(label, np.ndarray):\n self.set_label_npy2d(label)\n else:\n self.set_label(label)\n if weight is not None:\n if isinstance(weight, np.ndarray):\n self.set_weight_npy2d(weight)\n else:\n self.set_weight(weight)\n\n self.feature_names = feature_names\n self.feature_types = feature_types\n\n def _init_from_csr(self, csr):\n \"\"\"\n Initialize data from a CSR matrix.\n \"\"\"\n if len(csr.indices) != len(csr.data):\n raise ValueError('length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data)))\n handle = ctypes.c_void_p()\n _check_call(_LIB.XGDMatrixCreateFromCSREx(c_array(ctypes.c_size_t, csr.indptr),\n c_array(ctypes.c_uint, csr.indices),\n c_array(ctypes.c_float, csr.data),\n ctypes.c_size_t(len(csr.indptr)),\n ctypes.c_size_t(len(csr.data)),\n ctypes.c_size_t(csr.shape[1]),\n ctypes.byref(handle)))\n self.handle = handle\n\n def _init_from_csc(self, csc):\n \"\"\"\n Initialize data from a CSC matrix.\n \"\"\"\n if len(csc.indices) != len(csc.data):\n raise ValueError('length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data)))\n handle = ctypes.c_void_p()\n _check_call(_LIB.XGDMatrixCreateFromCSCEx(c_array(ctypes.c_size_t, csc.indptr),\n c_array(ctypes.c_uint, csc.indices),\n c_array(ctypes.c_float, csc.data),\n ctypes.c_size_t(len(csc.indptr)),\n ctypes.c_size_t(len(csc.data)),\n ctypes.c_size_t(csc.shape[0]),\n ctypes.byref(handle)))\n self.handle = handle\n\n def _init_from_npy2d(self, mat, missing, nthread):\n \"\"\"\n Initialize data from a 2-D numpy matrix.\n\n If ``mat`` does not have ``order='C'`` (aka row-major) or is not contiguous,\n a temporary copy will be made.\n\n If ``mat`` does not have ``dtype=numpy.float32``, a temporary copy will be made.\n\n So there could be as many as two temporary data copies; be mindful of input layout\n and type if memory use is a concern.\n \"\"\"\n if len(mat.shape) != 2:\n raise ValueError('Input numpy.ndarray must be 2 dimensional')\n # flatten the array by rows and ensure it is float32.\n # we try to avoid data copies if possible (reshape returns a view when possible\n # and we explicitly tell np.array to try and avoid copying)\n data = np.array(mat.reshape(mat.size), copy=False, dtype=np.float32)\n handle = ctypes.c_void_p()\n missing = missing if missing is not None else np.nan\n if nthread is None:\n _check_call(_LIB.XGDMatrixCreateFromMat(\n data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),\n c_bst_ulong(mat.shape[0]),\n c_bst_ulong(mat.shape[1]),\n ctypes.c_float(missing),\n ctypes.byref(handle)))\n else:\n _check_call(_LIB.XGDMatrixCreateFromMat_omp(\n data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),\n c_bst_ulong(mat.shape[0]),\n c_bst_ulong(mat.shape[1]),\n ctypes.c_float(missing),\n ctypes.byref(handle),\n nthread))\n self.handle = handle\n\n def _init_from_dt(self, data, nthread):\n \"\"\"\n Initialize data from a datatable Frame.\n \"\"\"\n ptrs = (ctypes.c_void_p * data.ncols)()\n if hasattr(data, \"internal\") and hasattr(data.internal, \"column\"):\n # datatable>0.8.0\n for icol in range(data.ncols):\n col = data.internal.column(icol)\n ptr = col.data_pointer\n ptrs[icol] = ctypes.c_void_p(ptr)\n else:\n # datatable<=0.8.0\n from datatable.internal import frame_column_data_r\n for icol in range(data.ncols):\n ptrs[icol] = frame_column_data_r(data, icol)\n\n # always return stypes for dt ingestion\n feature_type_strings = (ctypes.c_char_p * data.ncols)()\n for icol in range(data.ncols):\n feature_type_strings[icol] = ctypes.c_char_p(data.stypes[icol].name.encode('utf-8'))\n\n handle = ctypes.c_void_p()\n _check_call(_LIB.XGDMatrixCreateFromDT(\n ptrs, feature_type_strings,\n c_bst_ulong(data.shape[0]),\n c_bst_ulong(data.shape[1]),\n ctypes.byref(handle),\n nthread))\n self.handle = handle\n\n def __del__(self):\n if hasattr(self, \"handle\") and self.handle is not None:\n _check_call(_LIB.XGDMatrixFree(self.handle))\n self.handle = None\n\n def get_float_info(self, field):\n \"\"\"Get float property from the DMatrix.\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n Returns\n -------\n info : array\n a numpy array of float information of the data\n \"\"\"\n length = c_bst_ulong()\n ret = ctypes.POINTER(ctypes.c_float)()\n _check_call(_LIB.XGDMatrixGetFloatInfo(self.handle,\n c_str(field),\n ctypes.byref(length),\n ctypes.byref(ret)))\n return ctypes2numpy(ret, length.value, np.float32)\n\n def get_uint_info(self, field):\n \"\"\"Get unsigned integer property from the DMatrix.\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n Returns\n -------\n info : array\n a numpy array of unsigned integer information of the data\n \"\"\"\n length = c_bst_ulong()\n ret = ctypes.POINTER(ctypes.c_uint)()\n _check_call(_LIB.XGDMatrixGetUIntInfo(self.handle,\n c_str(field),\n ctypes.byref(length),\n ctypes.byref(ret)))\n return ctypes2numpy(ret, length.value, np.uint32)\n\n def set_float_info(self, field, data):\n \"\"\"Set float type property into the DMatrix.\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n data: numpy array\n The array of data to be set\n \"\"\"\n if getattr(data, 'base', None) is not None and \\\n data.base is not None and isinstance(data, np.ndarray) \\\n and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous):\n self.set_float_info_npy2d(field, data)\n return\n c_data = c_array(ctypes.c_float, data)\n _check_call(_LIB.XGDMatrixSetFloatInfo(self.handle,\n c_str(field),\n c_data,\n c_bst_ulong(len(data))))\n\n def set_float_info_npy2d(self, field, data):\n \"\"\"Set float type property into the DMatrix\n for numpy 2d array input\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n data: numpy array\n The array of data to be set\n \"\"\"\n if getattr(data, 'base', None) is not None and \\\n data.base is not None and isinstance(data, np.ndarray) \\\n and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous):\n warnings.warn(\"Use subset (sliced data) of np.ndarray is not recommended \" +\n \"because it will generate extra copies and increase memory consumption\")\n data = np.array(data, copy=True, dtype=np.float32)\n else:\n data = np.array(data, copy=False, dtype=np.float32)\n c_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))\n _check_call(_LIB.XGDMatrixSetFloatInfo(self.handle,\n c_str(field),\n c_data,\n c_bst_ulong(len(data))))\n\n def set_uint_info(self, field, data):\n \"\"\"Set uint type property into the DMatrix.\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n data: numpy array\n The array of data to be set\n \"\"\"\n if getattr(data, 'base', None) is not None and \\\n data.base is not None and isinstance(data, np.ndarray) \\\n and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous):\n warnings.warn(\"Use subset (sliced data) of np.ndarray is not recommended \" +\n \"because it will generate extra copies and increase memory consumption\")\n data = np.array(data, copy=True, dtype=ctypes.c_uint)\n else:\n data = np.array(data, copy=False, dtype=ctypes.c_uint)\n _check_call(_LIB.XGDMatrixSetUIntInfo(self.handle,\n c_str(field),\n c_array(ctypes.c_uint, data),\n c_bst_ulong(len(data))))\n\n def save_binary(self, fname, silent=True):\n \"\"\"Save DMatrix to an XGBoost buffer. Saved binary can be later loaded\n by providing the path to :py:func:`xgboost.DMatrix` as input.\n\n Parameters\n ----------\n fname : string\n Name of the output buffer file.\n silent : bool (optional; default: True)\n If set, the output is suppressed.\n \"\"\"\n _check_call(_LIB.XGDMatrixSaveBinary(self.handle,\n c_str(fname),\n ctypes.c_int(silent)))\n\n def set_label(self, label):\n \"\"\"Set label of dmatrix\n\n Parameters\n ----------\n label: array like\n The label information to be set into DMatrix\n \"\"\"\n self.set_float_info('label', label)\n\n def set_label_npy2d(self, label):\n \"\"\"Set label of dmatrix\n\n Parameters\n ----------\n label: array like\n The label information to be set into DMatrix\n from numpy 2D array\n \"\"\"\n self.set_float_info_npy2d('label', label)\n\n def set_weight(self, weight):\n \"\"\" Set weight of each instance.\n\n Parameters\n ----------\n weight : array like\n Weight for each data point\n\n .. note:: For ranking task, weights are per-group.\n\n In ranking task, one weight is assigned to each group (not each data\n point). This is because we only care about the relative ordering of\n data points within each group, so it doesn't make sense to assign\n weights to individual data points.\n \"\"\"\n self.set_float_info('weight', weight)\n\n def set_weight_npy2d(self, weight):\n \"\"\" Set weight of each instance\n for numpy 2D array\n\n Parameters\n ----------\n weight : array like\n Weight for each data point in numpy 2D array\n\n .. note:: For ranking task, weights are per-group.\n\n In ranking task, one weight is assigned to each group (not each data\n point). This is because we only care about the relative ordering of\n data points within each group, so it doesn't make sense to assign\n weights to individual data points.\n \"\"\"\n self.set_float_info_npy2d('weight', weight)\n\n def set_base_margin(self, margin):\n \"\"\" Set base margin of booster to start from.\n\n This can be used to specify a prediction value of\n existing model to be base_margin\n However, remember margin is needed, instead of transformed prediction\n e.g. for logistic regression: need to put in value before logistic transformation\n see also example/demo.py\n\n Parameters\n ----------\n margin: array like\n Prediction margin of each datapoint\n \"\"\"\n self.set_float_info('base_margin', margin)\n\n def set_group(self, group):\n \"\"\"Set group size of DMatrix (used for ranking).\n\n Parameters\n ----------\n group : array like\n Group size of each group\n \"\"\"\n _check_call(_LIB.XGDMatrixSetGroup(self.handle,\n c_array(ctypes.c_uint, group),\n c_bst_ulong(len(group))))\n\n def get_label(self):\n \"\"\"Get the label of the DMatrix.\n\n Returns\n -------\n label : array\n \"\"\"\n return self.get_float_info('label')\n\n def get_weight(self):\n \"\"\"Get the weight of the DMatrix.\n\n Returns\n -------\n weight : array\n \"\"\"\n return self.get_float_info('weight')\n\n def get_base_margin(self):\n \"\"\"Get the base margin of the DMatrix.\n\n Returns\n -------\n base_margin : float\n \"\"\"\n return self.get_float_info('base_margin')\n\n def num_row(self):\n \"\"\"Get the number of rows in the DMatrix.\n\n Returns\n -------\n number of rows : int\n \"\"\"\n ret = c_bst_ulong()\n _check_call(_LIB.XGDMatrixNumRow(self.handle,\n ctypes.byref(ret)))\n return ret.value\n\n def num_col(self):\n \"\"\"Get the number of columns (features) in the DMatrix.\n\n Returns\n -------\n number of columns : int\n \"\"\"\n ret = c_bst_ulong()\n _check_call(_LIB.XGDMatrixNumCol(self.handle,\n ctypes.byref(ret)))\n return ret.value\n\n def slice(self, rindex):\n \"\"\"Slice the DMatrix and return a new DMatrix that only contains `rindex`.\n\n Parameters\n ----------\n rindex : list\n List of indices to be selected.\n\n Returns\n -------\n res : DMatrix\n A new DMatrix containing only selected indices.\n \"\"\"\n res = DMatrix(None, feature_names=self.feature_names,\n feature_types=self.feature_types)\n res.handle = ctypes.c_void_p()\n _check_call(_LIB.XGDMatrixSliceDMatrix(self.handle,\n c_array(ctypes.c_int, rindex),\n c_bst_ulong(len(rindex)),\n ctypes.byref(res.handle)))\n return res\n\n @property\n def feature_names(self):\n \"\"\"Get feature names (column labels).\n\n Returns\n -------\n feature_names : list or None\n \"\"\"\n if self._feature_names is None:\n self._feature_names = ['f{0}'.format(i) for i in range(self.num_col())]\n return self._feature_names\n\n @property\n def feature_types(self):\n \"\"\"Get feature types (column types).\n\n Returns\n -------\n feature_types : list or None\n \"\"\"\n return self._feature_types\n\n @feature_names.setter\n def feature_names(self, feature_names):\n \"\"\"Set feature names (column labels).\n\n Parameters\n ----------\n feature_names : list or None\n Labels for features. None will reset existing feature names\n \"\"\"\n if feature_names is not None:\n # validate feature name\n try:\n if not isinstance(feature_names, str):\n feature_names = [n for n in iter(feature_names)]\n else:\n feature_names = [feature_names]\n except TypeError:\n feature_names = [feature_names]\n\n if len(feature_names) != len(set(feature_names)):\n raise ValueError('feature_names must be unique')\n if len(feature_names) != self.num_col():\n msg = 'feature_names must have the same length as data'\n raise ValueError(msg)\n # prohibit to use symbols may affect to parse. e.g. []<\n if not all(isinstance(f, STRING_TYPES) and\n not any(x in f for x in set(('[', ']', '<')))\n for f in feature_names):\n raise ValueError('feature_names may not contain [, ] or <')\n else:\n # reset feature_types also\n self.feature_types = None\n self._feature_names = feature_names\n\n @feature_types.setter\n def feature_types(self, feature_types):\n \"\"\"Set feature types (column types).\n\n This is for displaying the results and unrelated\n to the learning process.\n\n Parameters\n ----------\n feature_types : list or None\n Labels for features. None will reset existing feature names\n \"\"\"\n if feature_types is not None:\n if self._feature_names is None:\n msg = 'Unable to set feature types before setting names'\n raise ValueError(msg)\n\n if isinstance(feature_types, STRING_TYPES):\n # single string will be applied to all columns\n feature_types = [feature_types] * self.num_col()\n\n try:\n if not isinstance(feature_types, str):\n feature_types = [n for n in iter(feature_types)]\n else:\n feature_types = [feature_types]\n except TypeError:\n feature_types = [feature_types]\n\n if len(feature_types) != self.num_col():\n msg = 'feature_types must have the same length as data'\n raise ValueError(msg)\n\n valid = ('int', 'float', 'i', 'q')\n if not all(isinstance(f, STRING_TYPES) and f in valid\n for f in feature_types):\n raise ValueError('All feature_names must be {int, float, i, q}')\n self._feature_types = feature_types\n\n\nclass Booster(object):\n # pylint: disable=too-many-public-methods\n \"\"\"A Booster of XGBoost.\n\n Booster is the model of xgboost, that contains low level routines for\n training, prediction and evaluation.\n \"\"\"\n\n feature_names = None\n\n def __init__(self, params=None, cache=(), model_file=None):\n # pylint: disable=invalid-name\n \"\"\"\n Parameters\n ----------\n params : dict\n Parameters for boosters.\n cache : list\n List of cache items.\n model_file : string\n Path to the model file.\n \"\"\"\n for d in cache:\n if not isinstance(d, DMatrix):\n raise TypeError('invalid cache item: {}'.format(type(d).__name__))\n self._validate_features(d)\n\n dmats = c_array(ctypes.c_void_p, [d.handle for d in cache])\n self.handle = ctypes.c_void_p()\n _check_call(_LIB.XGBoosterCreate(dmats, c_bst_ulong(len(cache)),\n ctypes.byref(self.handle)))\n self.set_param({'seed': 0})\n self.set_param(params or {})\n if (params is not None) and ('booster' in params):\n self.booster = params['booster']\n else:\n self.booster = 'gbtree'\n if model_file is not None:\n self.load_model(model_file)\n\n def __del__(self):\n if self.handle is not None:\n _check_call(_LIB.XGBoosterFree(self.handle))\n self.handle = None\n\n def __getstate__(self):\n # can't pickle ctypes pointers\n # put model content in bytearray\n this = self.__dict__.copy()\n handle = this['handle']\n if handle is not None:\n raw = self.save_raw()\n this[\"handle\"] = raw\n return this\n\n def __setstate__(self, state):\n # reconstruct handle from raw data\n handle = state['handle']\n if handle is not None:\n buf = handle\n dmats = c_array(ctypes.c_void_p, [])\n handle = ctypes.c_void_p()\n _check_call(_LIB.XGBoosterCreate(dmats, c_bst_ulong(0), ctypes.byref(handle)))\n length = c_bst_ulong(len(buf))\n ptr = (ctypes.c_char * len(buf)).from_buffer(buf)\n _check_call(_LIB.XGBoosterLoadModelFromBuffer(handle, ptr, length))\n state['handle'] = handle\n self.__dict__.update(state)\n self.set_param({'seed': 0})\n\n def __copy__(self):\n return self.__deepcopy__(None)\n\n def __deepcopy__(self, _):\n return Booster(model_file=self.save_raw())\n\n def copy(self):\n \"\"\"Copy the booster object.\n\n Returns\n -------\n booster: `Booster`\n a copied booster model\n \"\"\"\n return self.__copy__()\n\n def load_rabit_checkpoint(self):\n \"\"\"Initialize the model by load from rabit checkpoint.\n\n Returns\n -------\n version: integer\n The version number of the model.\n \"\"\"\n version = ctypes.c_int()\n _check_call(_LIB.XGBoosterLoadRabitCheckpoint(\n self.handle, ctypes.byref(version)))\n return version.value\n\n def save_rabit_checkpoint(self):\n \"\"\"Save the current booster to rabit checkpoint.\"\"\"\n _check_call(_LIB.XGBoosterSaveRabitCheckpoint(self.handle))\n\n def attr(self, key):\n \"\"\"Get attribute string from the Booster.\n\n Parameters\n ----------\n key : str\n The key to get attribute from.\n\n Returns\n -------\n value : str\n The attribute value of the key, returns None if attribute do not exist.\n \"\"\"\n ret = ctypes.c_char_p()\n success = ctypes.c_int()\n _check_call(_LIB.XGBoosterGetAttr(\n self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)))\n if success.value != 0:\n return py_str(ret.value)\n else:\n return None\n\n def attributes(self):\n \"\"\"Get attributes stored in the Booster as a dictionary.\n\n Returns\n -------\n result : dictionary of attribute_name: attribute_value pairs of strings.\n Returns an empty dict if there's no attributes.\n \"\"\"\n length = c_bst_ulong()\n sarr = ctypes.POINTER(ctypes.c_char_p)()\n _check_call(_LIB.XGBoosterGetAttrNames(self.handle,\n ctypes.byref(length),\n ctypes.byref(sarr)))\n attr_names = from_cstr_to_pystr(sarr, length)\n res = dict([(n, self.attr(n)) for n in attr_names])\n return res\n\n def set_attr(self, **kwargs):\n \"\"\"Set the attribute of the Booster.\n\n Parameters\n ----------\n **kwargs\n The attributes to set. Setting a value to None deletes an attribute.\n \"\"\"\n for key, value in kwargs.items():\n if value is not None:\n if not isinstance(value, STRING_TYPES):\n raise ValueError(\"Set Attr only accepts string values\")\n value = c_str(str(value))\n _check_call(_LIB.XGBoosterSetAttr(\n self.handle, c_str(key), value))\n\n def set_param(self, params, value=None):\n \"\"\"Set parameters into the Booster.\n\n Parameters\n ----------\n params: dict/list/str\n list of key,value pairs, dict of key to value or simply str key\n value: optional\n value of the specified parameter, when params is str key\n \"\"\"\n if isinstance(params, Mapping):\n params = params.items()\n elif isinstance(params, STRING_TYPES) and value is not None:\n params = [(params, value)]\n for key, val in params:\n _check_call(_LIB.XGBoosterSetParam(self.handle, c_str(key), c_str(str(val))))\n\n def update(self, dtrain, iteration, fobj=None):\n \"\"\"Update for one iteration, with objective function calculated\n internally. This function should not be called directly by users.\n\n Parameters\n ----------\n dtrain : DMatrix\n Training data.\n iteration : int\n Current iteration number.\n fobj : function\n Customized objective function.\n\n \"\"\"\n if not isinstance(dtrain, DMatrix):\n raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))\n self._validate_features(dtrain)\n\n if fobj is None:\n _check_call(_LIB.XGBoosterUpdateOneIter(self.handle, ctypes.c_int(iteration),\n dtrain.handle))\n else:\n pred = self.predict(dtrain)\n grad, hess = fobj(pred, dtrain)\n self.boost(dtrain, grad, hess)\n\n def boost(self, dtrain, grad, hess):\n \"\"\"Boost the booster for one iteration, with customized gradient\n statistics. Like :func:`xgboost.core.Booster.update`, this\n function should not be called directly by users.\n\n Parameters\n ----------\n dtrain : DMatrix\n The training DMatrix.\n grad : list\n The first order of gradient.\n hess : list\n The second order of gradient.\n\n \"\"\"\n if len(grad) != len(hess):\n raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess)))\n if not isinstance(dtrain, DMatrix):\n raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))\n self._validate_features(dtrain)\n\n _check_call(_LIB.XGBoosterBoostOneIter(self.handle, dtrain.handle,\n c_array(ctypes.c_float, grad),\n c_array(ctypes.c_float, hess),\n c_bst_ulong(len(grad))))\n\n def eval_set(self, evals, iteration=0, feval=None):\n # pylint: disable=invalid-name\n \"\"\"Evaluate a set of data.\n\n Parameters\n ----------\n evals : list of tuples (DMatrix, string)\n List of items to be evaluated.\n iteration : int\n Current iteration.\n feval : function\n Custom evaluation function.\n\n Returns\n -------\n result: str\n Evaluation result string.\n \"\"\"\n for d in evals:\n if not isinstance(d[0], DMatrix):\n raise TypeError('expected DMatrix, got {}'.format(type(d[0]).__name__))\n if not isinstance(d[1], STRING_TYPES):\n raise TypeError('expected string, got {}'.format(type(d[1]).__name__))\n self._validate_features(d[0])\n\n dmats = c_array(ctypes.c_void_p, [d[0].handle for d in evals])\n evnames = c_array(ctypes.c_char_p, [c_str(d[1]) for d in evals])\n msg = ctypes.c_char_p()\n _check_call(_LIB.XGBoosterEvalOneIter(self.handle, ctypes.c_int(iteration),\n dmats, evnames,\n c_bst_ulong(len(evals)),\n ctypes.byref(msg)))\n res = msg.value.decode()\n if feval is not None:\n for dmat, evname in evals:\n feval_ret = feval(self.predict(dmat), dmat)\n if isinstance(feval_ret, list):\n for name, val in feval_ret:\n res += '\\t%s-%s:%f' % (evname, name, val)\n else:\n name, val = feval_ret\n res += '\\t%s-%s:%f' % (evname, name, val)\n return res\n\n def eval(self, data, name='eval', iteration=0):\n \"\"\"Evaluate the model on mat.\n\n Parameters\n ----------\n data : DMatrix\n The dmatrix storing the input.\n\n name : str, optional\n The name of the dataset.\n\n iteration : int, optional\n The current iteration number.\n\n Returns\n -------\n result: str\n Evaluation result string.\n \"\"\"\n self._validate_features(data)\n return self.eval_set([(data, name)], iteration)\n\n def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False,\n pred_contribs=False, approx_contribs=False, pred_interactions=False,\n validate_features=True):\n \"\"\"\n Predict with data.\n\n .. note:: This function is not thread safe.\n\n For each booster object, predict can only be called from one thread.\n If you want to run prediction using multiple thread, call ``bst.copy()`` to make copies\n of model object and then call ``predict()``.\n\n .. note:: Using ``predict()`` with DART booster\n\n If the booster object is DART type, ``predict()`` will perform dropouts, i.e. only\n some of the trees will be evaluated. This will produce incorrect results if ``data`` is\n not the training data. To obtain correct results on test sets, set ``ntree_limit`` to\n a nonzero value, e.g.\n\n .. code-block:: python\n\n preds = bst.predict(dtest, ntree_limit=num_round)\n\n Parameters\n ----------\n data : DMatrix\n The dmatrix storing the input.\n\n output_margin : bool\n Whether to output the raw untransformed margin value.\n\n ntree_limit : int\n Limit number of trees in the prediction; defaults to 0 (use all trees).\n\n pred_leaf : bool\n When this option is on, the output will be a matrix of (nsample, ntrees)\n with each record indicating the predicted leaf index of each sample in each tree.\n Note that the leaf index of a tree is unique per tree, so you may find leaf 1\n in both tree 1 and tree 0.\n\n pred_contribs : bool\n When this is True the output will be a matrix of size (nsample, nfeats + 1)\n with each record indicating the feature contributions (SHAP values) for that\n prediction. The sum of all feature contributions is equal to the raw untransformed\n margin value of the prediction. Note the final column is the bias term.\n\n approx_contribs : bool\n Approximate the contributions of each feature\n\n pred_interactions : bool\n When this is True the output will be a matrix of size (nsample, nfeats + 1, nfeats + 1)\n indicating the SHAP interaction values for each pair of features. The sum of each\n row (or column) of the interaction values equals the corresponding SHAP value (from\n pred_contribs), and the sum of the entire matrix equals the raw untransformed margin\n value of the prediction. Note the last row and column correspond to the bias term.\n\n validate_features : bool\n When this is True, validate that the Booster's and data's feature_names are identical.\n Otherwise, it is assumed that the feature_names are the same.\n\n Returns\n -------\n prediction : numpy array\n \"\"\"\n option_mask = 0x00\n if output_margin:\n option_mask |= 0x01\n if pred_leaf:\n option_mask |= 0x02\n if pred_contribs:\n option_mask |= 0x04\n if approx_contribs:\n option_mask |= 0x08\n if pred_interactions:\n option_mask |= 0x10\n\n if validate_features:\n self._validate_features(data)\n\n length = c_bst_ulong()\n preds = ctypes.POINTER(ctypes.c_float)()\n _check_call(_LIB.XGBoosterPredict(self.handle, data.handle,\n ctypes.c_int(option_mask),\n ctypes.c_uint(ntree_limit),\n ctypes.byref(length),\n ctypes.byref(preds)))\n preds = ctypes2numpy(preds, length.value, np.float32)\n if pred_leaf:\n preds = preds.astype(np.int32)\n nrow = data.num_row()\n if preds.size != nrow and preds.size % nrow == 0:\n chunk_size = int(preds.size / nrow)\n\n if pred_interactions:\n ngroup = int(chunk_size / ((data.num_col() + 1) * (data.num_col() + 1)))\n if ngroup == 1:\n preds = preds.reshape(nrow, data.num_col() + 1, data.num_col() + 1)\n else:\n preds = preds.reshape(nrow, ngroup, data.num_col() + 1, data.num_col() + 1)\n elif pred_contribs:\n ngroup = int(chunk_size / (data.num_col() + 1))\n if ngroup == 1:\n preds = preds.reshape(nrow, data.num_col() + 1)\n else:\n preds = preds.reshape(nrow, ngroup, data.num_col() + 1)\n else:\n preds = preds.reshape(nrow, chunk_size)\n return preds\n\n def save_model(self, fname):\n \"\"\"\n Save the model to a file.\n\n The model is saved in an XGBoost internal binary format which is\n universal among the various XGBoost interfaces. Auxiliary attributes of\n the Python Booster object (such as feature_names) will not be saved.\n To preserve all attributes, pickle the Booster object.\n\n Parameters\n ----------\n fname : string\n Output file name\n \"\"\"\n if isinstance(fname, STRING_TYPES): # assume file name\n _check_call(_LIB.XGBoosterSaveModel(self.handle, c_str(fname)))\n else:\n raise TypeError(\"fname must be a string\")\n\n def save_raw(self):\n \"\"\"\n Save the model to a in memory buffer representation\n\n Returns\n -------\n a in memory buffer representation of the model\n \"\"\"\n length = c_bst_ulong()\n cptr = ctypes.POINTER(ctypes.c_char)()\n _check_call(_LIB.XGBoosterGetModelRaw(self.handle,\n ctypes.byref(length),\n ctypes.byref(cptr)))\n return ctypes2buffer(cptr, length.value)\n\n def load_model(self, fname):\n \"\"\"\n Load the model from a file.\n\n The model is loaded from an XGBoost internal binary format which is\n universal among the various XGBoost interfaces. Auxiliary attributes of\n the Python Booster object (such as feature_names) will not be loaded.\n To preserve all attributes, pickle the Booster object.\n\n Parameters\n ----------\n fname : string or a memory buffer\n Input file name or memory buffer(see also save_raw)\n \"\"\"\n if isinstance(fname, STRING_TYPES):\n # assume file name, cannot use os.path.exist to check, file can be from URL.\n _check_call(_LIB.XGBoosterLoadModel(self.handle, c_str(fname)))\n else:\n buf = fname\n length = c_bst_ulong(len(buf))\n ptr = (ctypes.c_char * len(buf)).from_buffer(buf)\n _check_call(_LIB.XGBoosterLoadModelFromBuffer(self.handle, ptr, length))\n\n def dump_model(self, fout, fmap='', with_stats=False, dump_format=\"text\"):\n \"\"\"\n Dump model into a text or JSON file.\n\n Parameters\n ----------\n fout : string\n Output file name.\n fmap : string, optional\n Name of the file containing feature map names.\n with_stats : bool, optional\n Controls whether the split statistics are output.\n dump_format : string, optional\n Format of model dump file. Can be 'text' or 'json'.\n \"\"\"\n if isinstance(fout, STRING_TYPES):\n fout = open(fout, 'w')\n need_close = True\n else:\n need_close = False\n ret = self.get_dump(fmap, with_stats, dump_format)\n if dump_format == 'json':\n fout.write('[\\n')\n for i in range(len(ret)):\n fout.write(ret[i])\n if i < len(ret) - 1:\n fout.write(\",\\n\")\n fout.write('\\n]')\n else:\n for i in range(len(ret)):\n fout.write('booster[{}]:\\n'.format(i))\n fout.write(ret[i])\n if need_close:\n fout.close()\n\n def get_dump(self, fmap='', with_stats=False, dump_format=\"text\"):\n \"\"\"\n Returns the model dump as a list of strings.\n\n Parameters\n ----------\n fmap : string, optional\n Name of the file containing feature map names.\n with_stats : bool, optional\n Controls whether the split statistics are output.\n dump_format : string, optional\n Format of model dump. Can be 'text' or 'json'.\n \"\"\"\n length = c_bst_ulong()\n sarr = ctypes.POINTER(ctypes.c_char_p)()\n if self.feature_names is not None and fmap == '':\n flen = len(self.feature_names)\n\n fname = from_pystr_to_cstr(self.feature_names)\n\n if self.feature_types is None:\n # use quantitative as default\n # {'q': quantitative, 'i': indicator}\n ftype = from_pystr_to_cstr(['q'] * flen)\n else:\n ftype = from_pystr_to_cstr(self.feature_types)\n _check_call(_LIB.XGBoosterDumpModelExWithFeatures(\n self.handle,\n ctypes.c_int(flen),\n fname,\n ftype,\n ctypes.c_int(with_stats),\n c_str(dump_format),\n ctypes.byref(length),\n ctypes.byref(sarr)))\n else:\n if fmap != '' and not os.path.exists(fmap):\n raise ValueError(\"No such file: {0}\".format(fmap))\n _check_call(_LIB.XGBoosterDumpModelEx(self.handle,\n c_str(fmap),\n ctypes.c_int(with_stats),\n c_str(dump_format),\n ctypes.byref(length),\n ctypes.byref(sarr)))\n res = from_cstr_to_pystr(sarr, length)\n return res\n\n def get_fscore(self, fmap=''):\n \"\"\"Get feature importance of each feature.\n\n .. note:: Feature importance is defined only for tree boosters\n\n Feature importance is only defined when the decision tree model is chosen as base\n learner (`booster=gbtree`). It is not defined for other base learner types, such\n as linear learners (`booster=gblinear`).\n\n .. note:: Zero-importance features will not be included\n\n Keep in mind that this function does not include zero-importance feature, i.e.\n those features that have not been used in any split conditions.\n\n Parameters\n ----------\n fmap: str (optional)\n The name of feature map file\n \"\"\"\n\n return self.get_score(fmap, importance_type='weight')\n\n def get_score(self, fmap='', importance_type='weight'):\n \"\"\"Get feature importance of each feature.\n Importance type can be defined as:\n\n * 'weight': the number of times a feature is used to split the data across all trees.\n * 'gain': the average gain across all splits the feature is used in.\n * 'cover': the average coverage across all splits the feature is used in.\n * 'total_gain': the total gain across all splits the feature is used in.\n * 'total_cover': the total coverage across all splits the feature is used in.\n\n .. note:: Feature importance is defined only for tree boosters\n\n Feature importance is only defined when the decision tree model is chosen as base\n learner (`booster=gbtree`). It is not defined for other base learner types, such\n as linear learners (`booster=gblinear`).\n\n Parameters\n ----------\n fmap: str (optional)\n The name of feature map file.\n importance_type: str, default 'weight'\n One of the importance types defined above.\n \"\"\"\n if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}:\n raise ValueError('Feature importance is not defined for Booster type {}'\n .format(self.booster))\n\n allowed_importance_types = ['weight', 'gain', 'cover', 'total_gain', 'total_cover']\n if importance_type not in allowed_importance_types:\n msg = (\"importance_type mismatch, got '{}', expected one of \" +\n repr(allowed_importance_types))\n raise ValueError(msg.format(importance_type))\n\n # if it's weight, then omap stores the number of missing values\n if importance_type == 'weight':\n # do a simpler tree dump to save time\n trees = self.get_dump(fmap, with_stats=False)\n\n fmap = {}\n for tree in trees:\n for line in tree.split('\\n'):\n # look for the opening square bracket\n arr = line.split('[')\n # if no opening bracket (leaf node), ignore this line\n if len(arr) == 1:\n continue\n\n # extract feature name from string between []\n fid = arr[1].split(']')[0].split('<')[0]\n\n if fid not in fmap:\n # if the feature hasn't been seen yet\n fmap[fid] = 1\n else:\n fmap[fid] += 1\n\n return fmap\n\n else:\n average_over_splits = True\n if importance_type == 'total_gain':\n importance_type = 'gain'\n average_over_splits = False\n elif importance_type == 'total_cover':\n importance_type = 'cover'\n average_over_splits = False\n\n trees = self.get_dump(fmap, with_stats=True)\n\n importance_type += '='\n fmap = {}\n gmap = {}\n for tree in trees:\n for line in tree.split('\\n'):\n # look for the opening square bracket\n arr = line.split('[')\n # if no opening bracket (leaf node), ignore this line\n if len(arr) == 1:\n continue\n\n # look for the closing bracket, extract only info within that bracket\n fid = arr[1].split(']')\n\n # extract gain or cover from string after closing bracket\n g = float(fid[1].split(importance_type)[1].split(',')[0])\n\n # extract feature name from string before closing bracket\n fid = fid[0].split('<')[0]\n\n if fid not in fmap:\n # if the feature hasn't been seen yet\n fmap[fid] = 1\n gmap[fid] = g\n else:\n fmap[fid] += 1\n gmap[fid] += g\n\n # calculate average value (gain/cover) for each feature\n if average_over_splits:\n for fid in gmap:\n gmap[fid] = gmap[fid] / fmap[fid]\n\n return gmap\n\n def trees_to_dataframe(self, fmap=''):\n \"\"\"Parse a boosted tree model text dump into a pandas DataFrame structure.\n\n This feature is only defined when the decision tree model is chosen as base\n learner (`booster in {gbtree, dart}`). It is not defined for other base learner\n types, such as linear learners (`booster=gblinear`).\n\n Parameters\n ----------\n fmap: str (optional)\n The name of feature map file.\n \"\"\"\n # pylint: disable=too-many-locals\n if not PANDAS_INSTALLED:\n raise Exception(('pandas must be available to use this method.'\n 'Install pandas before calling again.'))\n\n if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}:\n raise ValueError('This method is not defined for Booster type {}'\n .format(self.booster))\n\n tree_ids = []\n node_ids = []\n fids = []\n splits = []\n y_directs = []\n n_directs = []\n missings = []\n gains = []\n covers = []\n\n trees = self.get_dump(fmap, with_stats=True)\n for i, tree in enumerate(trees):\n for line in tree.split('\\n'):\n arr = line.split('[')\n # Leaf node\n if len(arr) == 1:\n # Last element of line.split is an empy string\n if arr == ['']:\n continue\n # parse string\n parse = arr[0].split(':')\n stats = re.split('=|,', parse[1])\n\n # append to lists\n tree_ids.append(i)\n node_ids.append(int(re.findall(r'\\b\\d+\\b', parse[0])[0]))\n fids.append('Leaf')\n splits.append(float('NAN'))\n y_directs.append(float('NAN'))\n n_directs.append(float('NAN'))\n missings.append(float('NAN'))\n gains.append(float(stats[1]))\n covers.append(float(stats[3]))\n # Not a Leaf Node\n else:\n # parse string\n fid = arr[1].split(']')\n parse = fid[0].split('<')\n stats = re.split('=|,', fid[1])\n\n # append to lists\n tree_ids.append(i)\n node_ids.append(int(re.findall(r'\\b\\d+\\b', arr[0])[0]))\n fids.append(parse[0])\n splits.append(float(parse[1]))\n str_i = str(i)\n y_directs.append(str_i + '-' + stats[1])\n n_directs.append(str_i + '-' + stats[3])\n missings.append(str_i + '-' + stats[5])\n gains.append(float(stats[7]))\n covers.append(float(stats[9]))\n\n ids = [str(t_id) + '-' + str(n_id) for t_id, n_id in zip(tree_ids, node_ids)]\n df = DataFrame({'Tree': tree_ids, 'Node': node_ids, 'ID': ids,\n 'Feature': fids, 'Split': splits, 'Yes': y_directs,\n 'No': n_directs, 'Missing': missings, 'Gain': gains,\n 'Cover': covers})\n\n if callable(getattr(df, 'sort_values', None)):\n # pylint: disable=no-member\n return df.sort_values(['Tree', 'Node']).reset_index(drop=True)\n # pylint: disable=no-member\n return df.sort(['Tree', 'Node']).reset_index(drop=True)\n\n def _validate_features(self, data):\n \"\"\"\n Validate Booster and data's feature_names are identical.\n Set feature_names and feature_types from DMatrix\n \"\"\"\n if self.feature_names is None:\n self.feature_names = data.feature_names\n self.feature_types = data.feature_types\n else:\n # Booster can't accept data with different feature names\n if self.feature_names != data.feature_names:\n dat_missing = set(self.feature_names) - set(data.feature_names)\n my_missing = set(data.feature_names) - set(self.feature_names)\n\n msg = 'feature_names mismatch: {0} {1}'\n\n if dat_missing:\n msg += ('\\nexpected ' + ', '.join(str(s) for s in dat_missing) +\n ' in input data')\n\n if my_missing:\n msg += ('\\ntraining data did not have the following fields: ' +\n ', '.join(str(s) for s in my_missing))\n\n raise ValueError(msg.format(self.feature_names,\n data.feature_names))\n\n def get_split_value_histogram(self, feature, fmap='', bins=None, as_pandas=True):\n \"\"\"Get split value histogram of a feature\n\n Parameters\n ----------\n feature: str\n The name of the feature.\n fmap: str (optional)\n The name of feature map file.\n bin: int, default None\n The maximum number of bins.\n Number of bins equals number of unique split values n_unique,\n if bins == None or bins > n_unique.\n as_pandas: bool, default True\n Return pd.DataFrame when pandas is installed.\n If False or pandas is not installed, return numpy ndarray.\n\n Returns\n -------\n a histogram of used splitting values for the specified feature\n either as numpy array or pandas DataFrame.\n \"\"\"\n xgdump = self.get_dump(fmap=fmap)\n values = []\n regexp = re.compile(r\"\\[{0}<([\\d.Ee+-]+)\\]\".format(feature))\n for i in range(len(xgdump)):\n m = re.findall(regexp, xgdump[i])\n values.extend(map(float, m))\n\n n_unique = len(np.unique(values))\n bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)\n\n nph = np.histogram(values, bins=bins)\n nph = np.column_stack((nph[1][1:], nph[0]))\n nph = nph[nph[:, 1] > 0]\n\n if as_pandas and PANDAS_INSTALLED:\n return DataFrame(nph, columns=['SplitValue', 'Count'])\n elif as_pandas and not PANDAS_INSTALLED:\n sys.stderr.write(\n \"Returning histogram as ndarray (as_pandas == True, but pandas is not installed).\")\n return nph\n else:\n return nph\n" ]
[ [ "numpy.histogram", "numpy.unique", "numpy.vectorize", "numpy.column_stack", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
imu-hupeng/calc_metric
[ "7053a521987a43d5cef95695b81e2f31df75cfc3" ]
[ "speech_kit/signal_utils.py" ]
[ "import numpy\r\ndef expand2frame(x,n):\r\n r,c = x.shape\r\n y = numpy.zeros((r,c*2*n+c));\r\n t = 0;\r\n for i in range(-n,n+1):\r\n y[:,t*c:t*c+c] = numpy.roll(x,i,0);\r\n t = t + 1;\r\n return y" ]
[ [ "numpy.zeros", "numpy.roll" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ljuillen/MicroStructPy
[ "fa60e4b05e534fb745f18b2862bcedcdcfb4166e" ]
[ "src/microstructpy/_misc.py" ]
[ "\"\"\"Miscellaneous functions\n\nThis private module contains miscellaneous functions.\n\"\"\"\nimport numpy as np\n\n__author__ = 'Kenneth (Kip) Hart'\n\nkw_solid = {'crystalline', 'granular', 'solid'}\nkw_amorph = {'amorphous', 'glass', 'matrix'}\nkw_void = {'void', 'crack', 'hole'}\n\nori_kws = {'orientation', 'matrix', 'angle', 'angle_deg', 'angle_rad',\n 'rot_seq', 'rot_seq_rad', 'rot_seq_deg'}\ngen_kws = {'material_type', 'fraction', 'shape', 'name', 'color', 'position'}\n\ndemo_needs = {'basalt_circle.xml': ['aphanitic_cdf.csv', 'olivine_cdf.csv'],\n 'from_image.py': ['aluminum_micro.png']}\n\nmpl_plural_kwargs = {'edgecolors', 'facecolors', 'linewidths', 'antialiaseds',\n 'offsets'}\nplt_3d_adj = {\n 'left': 0.4,\n 'right': 1,\n 'bottom': 0,\n 'top': 0.8,\n}\n\n\n# --------------------------------------------------------------------------- #\n# #\n# Convert String to Value (Infer Type) #\n# #\n# --------------------------------------------------------------------------- #\ndef from_str(string):\n \"\"\" Convert string to number\n\n This function takes a string and converts it into a number or a list.\n\n Args:\n string (str): The string.\n\n Returns:\n The value in the string.\n\n \"\"\"\n beg_delims = ('(', '[', '{', '<')\n end_delims = (')', ']', '}', '>')\n\n string = string.strip()\n if any([c in string for c in beg_delims + end_delims]) or ',' in string:\n if string[0] in beg_delims:\n string = string[1:]\n if string[-1] in end_delims:\n string = string[:-1]\n val = []\n n_beg = 0\n n_end = 0\n elem_str = ''\n for char in string:\n if char in beg_delims:\n n_beg += 1\n elif char in end_delims:\n n_end += 1\n\n if (char == ',') and n_beg == n_end:\n val.append(from_str(elem_str.strip()))\n elem_str = ''\n else:\n elem_str += char\n val.append(from_str(elem_str.strip()))\n return val\n else:\n try:\n val = int(string)\n except ValueError:\n try:\n val = float(string)\n except ValueError:\n if string.lower() in ('true', 'yes'):\n val = True\n elif string.lower() in ('false', 'no'):\n val = False\n else:\n val = str(string)\n return val\n\n\n# --------------------------------------------------------------------------- #\n# #\n# Tangent Spheres #\n# #\n# --------------------------------------------------------------------------- #\ndef tangent_sphere(points, radii=None, simplices=None):\n \"\"\"Calculate center and radius of tangent sphere(s)\n\n This function computes the center and radius of an n-dimensional sphere\n that is tangent to (n+1) spheres. For example, in 2D this function\n computes the center and radius of a circle tangent to three other circles.\n\n The operation of this function can be vectorized using the ``simplices``\n input. The simplices should be an Mx(n+1) list of indices of the points.\n The result is an Mx(n+1) numpy array, where the first n columns are the\n coordinates of the sphere center. The final column is the radius of the\n sphere.\n\n If no radii are specified, the results are circumspheres of the simplices\n (circumcircles in 2D).\n\n Radii at each point can be speficied. If no radii are given, then the\n results are circumspheres of the simplices (circumcircles in 2D).\n\n Args:\n points (list, tuple, numpy.ndarray): List of points.\n radii (list, tuple, numpy.ndarray): List of radii. *(optional)*\n simplices (list, tuple, numpy.ndarray): List of simplices. *(optional)*\n\n Returns:\n numpy.ndarray: The centers and radii of tangent spheres.\n\n \"\"\"\n # set radii\n if radii is None:\n radii = np.full(len(points), 0)\n\n # extract points\n if simplices is None:\n simplices = np.arange(len(points)).reshape(1, -1)\n\n pts = np.array(points)[simplices]\n rs = np.array(radii)[simplices]\n\n # define circle distances\n cs = np.sum(pts * pts, axis=-1) - rs * rs\n\n # matrix and vector quantities\n pos1 = pts[:, 0]\n r1 = rs[:, 0]\n A = pts[:, 1:] - pos1[:, np.newaxis, :]\n b = -1 * (rs[:, 1:] - r1[:, np.newaxis])\n c = 0.5 * (cs[:, 1:] - cs[:, 0, np.newaxis])\n\n # linear system coefficients\n alpha = np.linalg.solve(A, b)\n beta = np.linalg.solve(A, c)\n\n # quadratic equation in rc\n r_beta = beta - pos1\n C1 = np.sum(alpha * alpha, axis=-1) - 1\n C2 = np.sum(r_beta * alpha, axis=-1) - r1\n C3 = np.sum(r_beta * r_beta, axis=-1) - r1 * r1\n\n # solve for rc\n discr = C2 * C2 - C1 * C3\n rt_discr = np.sqrt(discr)\n rt_discr[discr < 0] = 0\n\n rc1 = (-C2 + rt_discr) / C1\n rc2 = (-C2 - rt_discr) / C1\n\n mask = np.abs(rc1) < np.abs(rc2)\n rc = rc2\n rc[mask] = rc1[mask]\n rc[discr < 0] = 0\n\n # solve for center position\n posc = alpha * rc[:, np.newaxis] + beta\n\n # return results\n spheres = np.hstack((posc, rc.reshape(-1, 1)))\n return np.squeeze(spheres)\n\n\ndef axisEqual3D(ax):\n '''From stackoverflow: https://stackoverflow.com/a/19248731'''\n extents = np.array([getattr(ax, 'get_{}lim'.format(d))() for d in 'xyz'])\n sz = extents[:, 1] - extents[:, 0]\n centers = np.mean(extents, axis=1)\n maxsize = max(abs(sz))\n r = maxsize/2\n for ctr, dim in zip(centers, 'xyz'):\n getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)\n\n\ndef ax_objects(ax):\n n = 0\n for att in ['collections', 'images', 'lines', 'patches', 'texts']:\n n += len(getattr(ax, att))\n return n\n" ]
[ [ "numpy.linalg.solve", "numpy.sqrt", "numpy.abs", "numpy.squeeze", "numpy.mean", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Krl1/Flappy-Bird-AI
[ "cd581107763763175866f66a89ed0a35cdf24595" ]
[ "game_functions.py" ]
[ "import sys\nimport pygame\nimport numpy as np\nfrom pipeTop import PipeTop\nfrom pipeDown import PipeDown\n\ndef check_events(ai_settings, screen, bird):\n\t\"\"\"Reakcja na zdarzenia generowane przez klawiaturę\"\"\"\n\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tcheck_keydown_events(ai_settings, event, bird)\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\tcheck_keyup_events(ai_settings, event, bird)\n\n\ndef check_keydown_events(ai_settings, event, bird):\n\t\"\"\"Reakcja na naciśnięcie klawisza.\"\"\"\n\tif event.key == pygame.K_SPACE:\n\t\tif not ai_settings.pressed_space:\n\t\t\tbird.steps_to_move_to_top += ai_settings.bird_jump\n\t\t\tai_settings.pressed_space = True\n\ndef check_keyup_events(ai_settings, event, bird):\n\t\"\"\"Reakcja na zwolnienie klawisza.\"\"\"\n\tif event.key == pygame.K_SPACE:\n\t\tai_settings.pressed_space = False\n\ndef update_screen(ai_settings, screen, bird, pipesTop, pipesDown):\n\t\"\"\"Uaktualnienie obrazów na ekranie i przejście do nowego ekranu.\"\"\"\n\t# Odświeżenie ekranu w trakcie każdej iteracji pętli.\n\t# screen.fill(ai_settings.bg_color)\n\tscreen.blit(ai_settings.background_image, [0, 0])\n\tbird.blitme()\n\tfor pipe in pipesTop.sprites():\n\t\tpipe.blitme()\n\n\tfor pipe in pipesDown.sprites():\n\t\tpipe.blitme()\n\n\t# Wyświetlenie ostatio zmodyfikowanego ekranu.\n\tpygame.display.flip()\n\ndef check_pipes(ai_settings, screen, pipesTop, pipesDown):\n\tcheck_new_pipe = True\n\tfor pipe in pipesTop.copy():\n\t\tif pipe.rect.right <= 0:\n\t\t\tpipesTop.remove(pipe)\n\t\telif pipe.rect.right > ai_settings.screen_width:\n\t\t\tcheck_new_pipe = False\n\n\n\tfor pipe in pipesDown.copy():\n\t\tif pipe.rect.right <= 0:\n\t\t\tpipesDown.remove(pipe)\n\n\tif check_new_pipe:\n\t\trandom_int = np.random.randint(ai_settings.screen_height/3, 2*ai_settings.screen_height/3)\n\t\tnew_pipe_top = PipeTop(ai_settings, screen, random_int)\n\t\tnew_pipe_down = PipeDown(ai_settings, screen, random_int)\n\t\tpipesTop.add(new_pipe_top)\n\t\tpipesDown.add(new_pipe_down)\n\n\ndef check_collide(pipesDown,pipesTop,bird):\n\tfor pipe in pipesTop:\n\t\tis_collided = pygame.sprite.collide_rect(pipe, bird)\n\t\tif is_collided>0:\n\t\t\treturn False\n\t\t\t\n\tfor pipe in pipesDown:\n\t\tis_collided = pygame.sprite.collide_rect(pipe, bird)\n\t\tif is_collided>0:\n\t\t\treturn False\n\n\treturn True\n\t\t " ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kupparsudhir/kaggle
[ "29966f1277067022e00c568e7576a3b94f295383" ]
[ "zillow-price/zestimate_linear_model.py" ]
[ "\"\"\"\nCreated on Mon Sep 18 15:49:13 2017\n\n@author: sudhir\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport scipy as sy\n\n# Import data set\ntrain = pd.read_csv(\"../input/train_2017.csv\")\nproperties = pd.read_csv(\"../input/properties_2017.csv\", low_memory=False)\nsample = pd.read_csv(\"../input/sample_submission.csv\")\n\nfor c, dtype in zip(properties.columns, properties.dtypes):\n if dtype == np.float64:\n properties[c] = properties[c].astype(np.float32)\ntrain.head()\nproperties.head()\n\ntrain_df = pd.merge(train, properties, on=\"parcelid\", how=\"left\")\ntrain_df.info()\ntrain_df.fillna(0)\n\n# traget variable\nsns.distplot((train[\"logerror\"]))\nplt.show()\nfor c in train_df.dtypes[train_df.dtypes == object].index.values:\n train_df[c] = train_df[c] == True\n#\n\nX = train_df.drop(\n [\n \"parcelid\",\n \"logerror\",\n \"transactiondate\",\n \"propertyzoningdesc\",\n \"propertycountylandusecode\",\n ],\n axis=1,\n)\ny = train_df[\"logerror\"].values.astype(np.float64)\n\nprint(\"training linear regression model\", \"--\" * 10)\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\n\nlm = LinearRegression()\nx_train, x_test, y_train, y_test = train_test_split(\n X, y, test_size=0.7, random_state=100\n)\ntrain_columns = x_train.columns\n\nlm.fit(x_train.fillna(0), y_train)\nprd = lm.predict(x_test.fillna(0))\n\n# print('R2:',lm.score(x_test,y_test))\nsample[\"parcelid\"] = sample[\"ParcelId\"]\ndf_test = sample.merge(properties, on=\"parcelid\", how=\"left\")\n\nx_test = df_test[train_columns]\nfor c in x_test.dtypes[x_test.dtypes == object].index.values:\n x_test[c] = x_test[c] == True\nprd = lm.predict(x_test.fillna(0))\n\nsub = pd.read_csv(\"../input/sample_submission.csv\")\nfor c in sub.columns[sub.columns != \"ParcelId\"]:\n sub[c] = prd\n\nprint(\"Writing csv ...\")\nsub.to_csv(\"linear_model.csv\", index=False, float_format=\"%.4f\")\n" ]
[ [ "pandas.merge", "pandas.read_csv", "sklearn.model_selection.train_test_split", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
surajpaib/swa_gaussian
[ "c4e837fc6269403032950f25d69ded7fe67729a0" ]
[ "swag/utils.py" ]
[ "import itertools\nimport torch\nimport os\nimport copy\nfrom datetime import datetime\nimport math\nimport numpy as np\nimport tqdm\n\nimport torch.nn.functional as F\n\n\ndef flatten(lst):\n tmp = [i.contiguous().view(-1, 1) for i in lst]\n return torch.cat(tmp).view(-1)\n\n\ndef unflatten_like(vector, likeTensorList):\n # Takes a flat torch.tensor and unflattens it to a list of torch.tensors\n # shaped like likeTensorList\n outList = []\n i = 0\n for tensor in likeTensorList:\n # n = module._parameters[name].numel()\n n = tensor.numel()\n outList.append(vector[:, i : i + n].view(tensor.shape))\n i += n\n return outList\n\n\ndef LogSumExp(x, dim=0):\n m, _ = torch.max(x, dim=dim, keepdim=True)\n return m + torch.log((x - m).exp().sum(dim=dim, keepdim=True))\n\n\ndef adjust_learning_rate(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n return lr\n\n\ndef save_checkpoint(dir, epoch, name=\"checkpoint\", **kwargs):\n state = {\"epoch\": epoch}\n state.update(kwargs)\n filepath = os.path.join(dir, \"%s-%d.pt\" % (name, epoch))\n torch.save(state, filepath)\n\n\ndef train_epoch(\n loader,\n model,\n criterion,\n optimizer,\n cuda=True,\n regression=False,\n verbose=False,\n subset=None,\n):\n loss_sum = 0.0\n correct = 0.0\n verb_stage = 0\n\n num_objects_current = 0\n num_batches = len(loader)\n\n model.train()\n\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n\n if verbose:\n loader = tqdm.tqdm(loader, total=num_batches)\n\n for i, (input, target) in enumerate(loader):\n if cuda:\n input = input.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n loss, output = criterion(model, input, target)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss_sum += loss.data.item() * input.size(0)\n\n if not regression:\n pred = output.data.argmax(1, keepdim=True)\n correct += pred.eq(target.data.view_as(pred)).sum().item()\n\n num_objects_current += input.size(0)\n\n if verbose and 10 * (i + 1) / num_batches >= verb_stage + 1:\n print(\n \"Stage %d/10. Loss: %12.4f. Acc: %6.2f\"\n % (\n verb_stage + 1,\n loss_sum / num_objects_current,\n correct / num_objects_current * 100.0,\n )\n )\n verb_stage += 1\n\n return {\n \"loss\": loss_sum / num_objects_current,\n \"accuracy\": None if regression else correct / num_objects_current * 100.0,\n }\n\n\n\ndef eval(loader, model, criterion, cuda=True, regression=False, verbose=False):\n loss_sum = 0.0\n correct = 0.0\n num_objects_total = len(loader.dataset)\n\n model.eval()\n\n with torch.no_grad():\n if verbose:\n loader = tqdm.tqdm(loader)\n for i, (input, target) in enumerate(loader):\n if cuda:\n input = input.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n loss, output = criterion(model, input, target)\n\n loss_sum += loss.item() * input.size(0)\n\n if not regression:\n pred = output.data.argmax(1, keepdim=True)\n correct += pred.eq(target.data.view_as(pred)).sum().item()\n\n return {\n \"loss\": loss_sum / num_objects_total,\n \"accuracy\": None if regression else correct / num_objects_total * 100.0,\n }\n\n\ndef predict(loader, model, verbose=False):\n predictions = list()\n targets = list()\n\n model.eval()\n\n if verbose:\n loader = tqdm.tqdm(loader)\n\n offset = 0\n with torch.no_grad():\n for input, target in loader:\n input = input.cuda(non_blocking=True)\n output = model(input)\n\n batch_size = input.size(0)\n predictions.append(F.softmax(output, dim=1).cpu().numpy())\n targets.append(target.numpy())\n offset += batch_size\n\n return {\"predictions\": np.vstack(predictions), \"targets\": np.concatenate(targets)}\n\n\ndef moving_average(net1, net2, alpha=1):\n for param1, param2 in zip(net1.parameters(), net2.parameters()):\n param1.data *= 1.0 - alpha\n param1.data += param2.data * alpha\n\n\ndef _check_bn(module, flag):\n if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):\n flag[0] = True\n\n\ndef check_bn(model):\n flag = [False]\n model.apply(lambda module: _check_bn(module, flag))\n return flag[0]\n\n\ndef reset_bn(module):\n if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):\n module.running_mean = torch.zeros_like(module.running_mean)\n module.running_var = torch.ones_like(module.running_var)\n\n\ndef _get_momenta(module, momenta):\n if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):\n momenta[module] = module.momentum\n\n\ndef _set_momenta(module, momenta):\n if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):\n module.momentum = momenta[module]\n\n\ndef bn_update(loader, model, verbose=False, subset=None, **kwargs):\n \"\"\"\n BatchNorm buffers update (if any).\n Performs 1 epochs to estimate buffers average using train dataset.\n\n :param loader: train dataset loader for buffers average estimation.\n :param model: model being update\n :return: None\n \"\"\"\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n\n loader = tqdm.tqdm(loader, total=num_batches)\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))\n\n\ndef inv_softmax(x, eps=1e-10):\n return torch.log(x / (1.0 - x + eps))\n\n\ndef predictions(test_loader, model, seed=None, cuda=True, regression=False, **kwargs):\n # will assume that model is already in eval mode\n # model.eval()\n preds = []\n targets = []\n for input, target in test_loader:\n if seed is not None:\n torch.manual_seed(seed)\n if cuda:\n input = input.cuda(non_blocking=True)\n output = model(input, **kwargs)\n if regression:\n preds.append(output.cpu().data.numpy())\n else:\n probs = F.softmax(output, dim=1)\n preds.append(probs.cpu().data.numpy())\n targets.append(target.numpy())\n return np.vstack(preds), np.concatenate(targets)\n\n\ndef schedule(epoch, lr_init, epochs, swa, swa_start=None, swa_lr=None):\n t = (epoch) / (swa_start if swa else epochs)\n lr_ratio = swa_lr / lr_init if swa else 0.01\n if t <= 0.5:\n factor = 1.0\n elif t <= 0.9:\n factor = 1.0 - (1.0 - lr_ratio) * (t - 0.5) / 0.4\n else:\n factor = lr_ratio\n return lr_init * factor\n" ]
[ [ "torch.nn.functional.softmax", "torch.max", "torch.cat", "torch.manual_seed", "torch.zeros_like", "numpy.concatenate", "torch.autograd.Variable", "torch.no_grad", "torch.log", "torch.ones_like", "numpy.vstack", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
agcapps/conduit
[ "feb58d64ed3725a478f827a8fdcf02514e7711cb" ]
[ "src/tests/conduit/python/t_python_conduit_node.py" ]
[ "###############################################################################\n# Copyright (c) 2014-2018, Lawrence Livermore National Security, LLC.\n# \n# Produced at the Lawrence Livermore National Laboratory\n# \n# LLNL-CODE-666778\n# \n# All rights reserved.\n# \n# This file is part of Conduit. \n# \n# For details, see: http://software.llnl.gov/conduit/.\n# \n# Please also read conduit/LICENSE\n# \n# Redistribution and use in source and binary forms, with or without \n# modification, are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the disclaimer below.\n# \n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the disclaimer (as noted below) in the\n# documentation and/or other materials provided with the distribution.\n# \n# * Neither the name of the LLNS/LLNL nor the names of its contributors may\n# be used to endorse or promote products derived from this software without\n# specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,\n# LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, \n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\n# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE \n# POSSIBILITY OF SUCH DAMAGE.\n# \n###############################################################################\n\"\"\"\n file: t_python_conduit_node.py\n description: Unit tests for conduit::Node python module interface.\n\n\"\"\"\n\nimport sys\nimport unittest\n\nfrom conduit import Node\n\nimport numpy as np\n\n\nclass Test_Conduit_Node(unittest.TestCase):\n def test_simple(self):\n a_val = np.uint32(10)\n b_val = np.uint32(20)\n c_val = np.float64(30.0)\n\n n = Node()\n n['a'] = a_val\n n['b'] = b_val\n n['c'] = c_val\n \n self.assertTrue(n['a'] == a_val)\n self.assertTrue(n['b'] == b_val)\n self.assertTrue(n['c'] == c_val)\n\n def test_nested(self):\n val = np.uint32(10)\n n = Node()\n n['a']['b'] = val\n print(n['a']['b'])\n self.assertEqual(n['a']['b'],val)\n\n def test_vector(self):\n vec = np.array(range(100), np.uint32)\n n = Node()\n n['a'] = vec\n self.assertEqual(n['a'][99], 99)\n\n def test_fetch(self):\n vec = np.array(range(100), np.uint32)\n n = Node()\n n['a'] = vec\n na = n.fetch('a')\n na_val = na.value()\n self.assertEqual(na_val[99], 99)\n \n def test_child(self):\n vec = np.array(range(100), np.uint32)\n n = Node()\n n['a'] = vec\n na = n.child(0)\n na_val = na.value()\n self.assertEqual(na_val[99], 99)\n n['b'] = vec\n self.assertEqual(n.number_of_children(),2)\n \n def test_save_load(self):\n # on windows, this breaks at 27 !?\n alen = 26\n vec = np.array(range(alen), np.uint32)\n n = Node()\n n['a'] = vec\n print(n)\n n.save(\"test_pyconduit_node_save_load.conduit_bin\")\n nl = Node()\n nl.load(\"test_pyconduit_node_save_load.conduit_bin\")\n print(nl)\n self.assertEqual(nl['a'][alen-1], alen-1)\n \n n.save(\"test_pyconduit_node_json_save_load.json\",protocol=\"json\")\n nl = Node()\n nl.load(\"test_pyconduit_node_json_save_load.json\", protocol=\"json\")\n print(nl)\n self.assertEqual(nl['a'][alen-1], alen-1)\n \n n.save(\"test_pyconduit_node_base64_json_save_load.conduit_base64_json\", protocol=\"conduit_base64_json\")\n nl = Node()\n nl.load(\"test_pyconduit_node_base64_json_save_load.conduit_base64_json\", protocol=\"conduit_base64_json\")\n print(nl)\n self.assertEqual(nl['a'][alen-1], alen-1)\n\n n.save(\"test_pyconduit_node_json_save_load.yaml\",protocol=\"yaml\")\n nl = Node()\n nl.load(\"test_pyconduit_node_json_save_load.yaml\", protocol=\"yaml\")\n print(nl)\n self.assertEqual(nl['a'][alen-1], alen-1)\n\n def test_parse(self):\n n = Node()\n n.parse('{\"a\": 42.0}',\"json\")\n self.assertTrue(n['a'] == np.float64(42.0))\n n.parse('a: 52.0',\"yaml\")\n self.assertTrue(n['a'] == np.float64(52.0))\n\n def test_parent(self):\n vec = np.array(range(100), np.uint32)\n n = Node()\n n['a'] = vec\n na = n.fetch('a')\n self.assertFalse(na.is_root())\n # todo: test parent()\n\n def test_total_bytes(self):\n vec = np.array(range(100), np.uint32)\n n = Node()\n n['a'] = vec\n self.assertEqual(n.total_strided_bytes(),4 * 100)\n self.assertEqual(n.total_bytes_compact(),4 * 100)\n # TODO: check if n.is_compact() should pass as well?\n # it doesn't currently\n self.assertTrue(n.fetch('a').is_compact())\n\n def test_paths(self):\n n = Node()\n n['a'] = 1\n n['b'] = 2\n n['c'] = 3\n for v in ['a','b','c']:\n self.assertTrue(n.has_path(v))\n paths = n.child_names()\n for v in ['a','b','c']:\n self.assertTrue(v in paths)\n\n def test_list(self):\n n = Node()\n n.append().set(1)\n self.assertTrue(n.child(0).value(),1)\n self.assertTrue(n[0],1)\n n2 = Node()\n n2_c = n2.append()\n n2_c.set(2)\n self.assertEqual(n2.child(0).value(),2)\n\n n3 = Node()\n n3.fetch(\"here\").append().set(\"a\")\n n3.fetch(\"here\").append().set(\"b\")\n self.assertTrue(n3.fetch(\"here\").child(0).value(),\"a\")\n self.assertTrue(n3.fetch(\"here\").child(1).value(),\"b\")\n\n n4 = Node()\n n4[\"here\"].append().set(\"a\")\n n5 = n4[\"here\"]\n n5.append().set(\"b\")\n self.assertTrue(n4[\"here\"].child(0).value(),\"a\")\n self.assertTrue(n4[\"here\"].child(1).value(),\"b\")\n self.assertTrue(n4[\"here\"][0],\"a\")\n self.assertTrue(n4[\"here\"][1],\"b\")\n\n\n \n def test_remove(self):\n n = Node()\n n['a'] = 1\n n['b'] = 2\n n['c'] = 3\n self.assertEqual(n.number_of_children(),3)\n n.remove(path='c')\n self.assertEqual(n.number_of_children(),2)\n paths = n.child_names()\n for v in ['a','b']:\n self.assertTrue(v in paths)\n n.remove(index=0)\n paths = n.child_names()\n for v in ['b']:\n self.assertTrue(v in paths)\n\n def test_info(self):\n n = Node()\n n['a'] = 1\n n['b'] = 2\n n['c'] = 3\n ni = n.info();\n #print ni\n self.assertEqual(ni[\"total_strided_bytes\"],n.total_strided_bytes())\n\n def test_set_all_types(self):\n types = [ 'int8', 'int16', 'int32', 'int64',\n 'uint8', 'uint16', 'uint32', 'uint64',\n 'float32', 'float64']\n for type in types:\n data = np.array(range(10), dtype=type)\n n = Node()\n n.set(data)\n for i in range(len(data)):\n self.assertEqual(n.value()[i], data[i])\n\n def test_set_external(self):\n types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']\n for type in types:\n ext_data = np.array(range(10), dtype=type)\n n = Node()\n n.set_external(ext_data)\n for i in range(len(ext_data)):\n self.assertEqual(n.value()[i], ext_data[i])\n ext_data[5] = 11\n n.value()[8] = 77\n n.value()[2] = 8\n for i in range(len(ext_data)):\n self.assertEqual(n.value()[i], ext_data[i])\n\n def test_set_external_node(self):\n n = Node()\n n.set(np.array(range(10), np.int32))\n n2 = Node()\n # test set external with node\n n2.set_external(n)\n for i in range(10):\n self.assertEqual(n.value()[i], n2.value()[i])\n n.value()[2] = 8\n n.value()[8] = 77\n # set of n should reflect in n2 with set_external\n self.assertEqual(8, n2.value()[2])\n self.assertEqual(77, n2.value()[8])\n\n def test_set_external_basic_slice(self):\n types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']\n for type in types:\n base_data = np.array(range(20), dtype=type)\n ext_data = base_data[1:16]\n n = Node()\n n.set_external(ext_data)\n for i in range(len(ext_data)):\n self.assertEqual(n.value()[i], ext_data[i])\n ext_data[5] = 11\n n.value()[6] = 77\n n.value()[2] = 8\n for i in range(len(ext_data)):\n self.assertEqual(n.value()[i], ext_data[i])\n\n def test_set_external_basic_strides(self):\n types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']\n for type in types:\n base_data = np.array(range(20), dtype=type)\n ext_data = base_data[1:16:2]\n n = Node()\n n.set_external(ext_data)\n for i in range(len(ext_data)):\n self.assertEqual(n.value()[i], ext_data[i])\n ext_data[5] = 11\n n.value()[6] = 77\n n.value()[2] = 8\n for i in range(len(ext_data)):\n self.assertEqual(n.value()[i], ext_data[i])\n\n def test_diff(self):\n n1 = Node()\n n2 = Node()\n info = Node()\n n1['a'] = 1\n self.assertTrue(n1.diff(n2,info))\n print(info)\n n2['a'] = 1\n self.assertFalse(n1.diff(n2,info))\n\n \n n2['b'] = 2.0\n self.assertTrue(n1.diff(n2,info))\n self.assertFalse(n1.diff_compatible(n2,info))\n n1['b'] = 1.0\n self.assertFalse(n1.diff(n2,info,10))\n\n def test_list_of_ints(self):\n # also covered by test_set_all_types\n # but this was the reproducer for \n # https://github.com/LLNL/conduit/issues/281\n n = Node()\n a = np.array(list((1,2,3)))\n n['a'] = a\n self.assertEqual(n['a'][0], 1)\n self.assertEqual(n['a'][1], 2)\n self.assertEqual(n['a'][2], 3)\n\n def test_compact_to(self):\n n = Node()\n n['a'] = 1\n n['b'] = 2\n n['c'] = 3\n ni = n.info()\n self.assertEqual(ni[\"mem_spaces\"].number_of_children(), 3)\n \n n2 = Node()\n n.compact_to(n2)\n ni = n2.info()\n print(ni)\n self.assertEqual(ni[\"mem_spaces\"].number_of_children(), 1)\n\n def test_update(self):\n n = Node()\n data = np.array(range(10), dtype='float64')\n n[\"data\"].set_external(data)\n \n print(n)\n \n n2 = Node()\n n2.update(n)\n print(n2)\n self.assertEqual(n2[\"data\"][0],0)\n \n n3 = Node()\n n3.update_external(n)\n data[0] = 10\n print(n3)\n self.assertEqual(n3[\"data\"][0],10)\n \n n4 = Node()\n n4[\"data\"] = 10\n n4.update_compatible(n)\n print(n4)\n self.assertEqual(n4[\"data\"],10)\n\n\n def test_reset(self):\n n = Node()\n data = np.array(range(10), dtype='float64')\n n[\"data\"].set_external(data)\n \n print(n)\n \n n.reset()\n self.assertEqual(n.number_of_children(), 0)\n\n def test_child_rename(self):\n a_val = np.uint32(10)\n b_val = np.uint32(20)\n\n n = Node()\n with self.assertRaises(Exception):\n n.rename_child('a','b')\n\n n['a'] = a_val\n n['b'] = b_val\n\n with self.assertRaises(Exception):\n n.rename_child('bad','good')\n\n with self.assertRaises(Exception):\n n.rename_child('b','a')\n\n self.assertTrue(n['a'] == a_val)\n self.assertTrue(n['b'] == b_val)\n \n n.rename_child('b','c')\n \n self.assertTrue(n['a'] == a_val)\n self.assertTrue(n['c'] == b_val)\n\n def test_string(self):\n n = Node();\n n.set(\"my string!\")\n print(n)\n self.assertEqual(n.value(),\"my string!\")\n # test numpy string\n nps = np.string_(\"my numpy string!\")\n n.set(nps)\n print(n)\n print(repr(n))\n self.assertEqual(n.value(),\"my numpy string!\")\n aofstrs = np.array([\"here\",\"are\",\"a\",\"few\",\"strings\"])\n print(aofstrs)\n n.set(aofstrs)\n print(n)\n self.assertEqual(n[0],\"here\")\n self.assertEqual(n[1],\"are\")\n self.assertEqual(n[2],\"a\")\n self.assertEqual(n[3],\"few\")\n self.assertEqual(n[4],\"strings\")\n\n def test_numeric_tuples(self):\n n = Node()\n n[\"tuple_0\"].set((1, 2, 3, 4))\n n[\"tuple_1\"].set((1.0, 2.0, 3.0, 4.0))\n n[\"tuple_2\"].set((1, 2, 3, 4.0))\n print(n)\n self.assertEqual(n['tuple_0'][0], 1)\n self.assertEqual(n['tuple_0'][1], 2)\n self.assertEqual(n['tuple_0'][2], 3)\n self.assertEqual(n['tuple_0'][3], 4)\n \n self.assertEqual(n['tuple_1'][0], 1.0)\n self.assertEqual(n['tuple_1'][1], 2.0)\n self.assertEqual(n['tuple_1'][2], 3.0)\n self.assertEqual(n['tuple_1'][3], 4.0)\n \n self.assertEqual(n['tuple_2'][0], 1.0)\n self.assertEqual(n['tuple_2'][1], 2.0)\n self.assertEqual(n['tuple_2'][2], 3.0)\n self.assertEqual(n['tuple_2'][3], 4.0)\n\n\n def test_numeric_lists(self):\n n = Node()\n n[\"list_0\"].set((1, 2, 3, 4))\n n[\"list_1\"].set((1.0, 2.0, 3.0, 4.0))\n n[\"list_2\"].set((1, 2, 3, 4.0))\n print(n)\n self.assertEqual(n['list_0'][0], 1)\n self.assertEqual(n['list_0'][1], 2)\n self.assertEqual(n['list_0'][2], 3)\n self.assertEqual(n['list_0'][3], 4)\n \n self.assertEqual(n['list_1'][0], 1.0)\n self.assertEqual(n['list_1'][1], 2.0)\n self.assertEqual(n['list_1'][2], 3.0)\n self.assertEqual(n['list_1'][3], 4.0)\n \n self.assertEqual(n['list_2'][0], 1.0)\n self.assertEqual(n['list_2'][1], 2.0)\n self.assertEqual(n['list_2'][2], 3.0)\n self.assertEqual(n['list_2'][3], 4.0)\n\n def test_general_tuples(self):\n n = Node()\n n.set((1, \"here\"))\n print(n)\n self.assertEqual(n[0], 1.0)\n self.assertEqual(n[1], \"here\")\n\n def test_general_lists(self):\n n = Node()\n n.set([1, \"there\"])\n print(n)\n self.assertEqual(n[0], 1.0)\n self.assertEqual(n[1], \"there\")\n \n\nif __name__ == '__main__':\n unittest.main()\n\n\n" ]
[ [ "numpy.array", "numpy.string_", "numpy.float64", "numpy.uint32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
popesculuca00/Self-driving-car
[ "673e03b2ae046fde7e923ed3377dc3a4c0fb742e" ]
[ "utils.py" ]
[ "import os\r\nimport torch\r\n\r\n\r\ndef get_batch_mask(commands): # shape ( commands, batch_size, params ) == (4 , batch_size, 3) \r\n mask = torch.zeros(4, len(commands), 3)\r\n for i in range( len(commands) ):\r\n mask[ commands[i], i, :] = torch.ones(3)\r\n return mask\r\n\r\ndef save_model(model, epoch, path=None, optimizer=None):\r\n model.eval()\r\n torch.save(model.state_dict(), os.path.join(path, f\"model_epoch_{epoch}.pth\"))\r\n if optimizer:\r\n torch.save(optimizer.state_dict(), os.path.join(path, f\"optimizer_epoch_{epoch}.pth\"))\r\n model.train()\r\n\r\ndef load_model(model, model_path, optimizer_path=None):\r\n \"\"\"\r\n Loads model and optimizer (if given) from state_dict\r\n \"\"\"\r\n model.load_state_dict( torch.load(model_path))\r\n if optimizer_path:\r\n optim = torch.load(optimizer_path)\r\n return model, optim\r\n return model\r\n\r\ndef jit_compile_model(model):\r\n with torch.jit.optimized_execution(True):\r\n jitted_model = torch.jit.script(model)\r\n return jitted_model\r\n\r\n" ]
[ [ "torch.jit.script", "torch.jit.optimized_execution", "torch.ones", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shijun18/Spine_Seg
[ "90c41d8ee08235c43bd3a5236da5a0ee7066fced" ]
[ "post_process.py" ]
[ "import sys\nimport os\nimport SimpleITK as sitk\nimport numpy as np\nfrom tqdm import tqdm\n\n\n\ndef post_process(label,n):\n from skimage.morphology import remove_small_objects\n\n final= np.zeros_like(label,dtype=np.uint8)\n for i in range(1,n):\n roi = (label == i).astype(np.bool)\n roi = remove_small_objects(roi,min_size=64, connectivity=1,in_place=False)\n final[roi == 1] = i\n return final\n\n\n# result_path = './result/tmp/fold5'\n# result_path = './result/Spine/v1-2-4.1-all/All/fusion/'\nresult_path = './result/Spine/final/fusion/'\n# save_folder = './result/tmp/post_fold5'\n# save_folder = './result/Spine/v1-2-4.1-all/All/post_fusion/'\nsave_folder = './result/Spine/final/post_fusion/'\n\n\nif not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n\nfor item in tqdm(os.scandir(result_path)):\n data = sitk.ReadImage(item.path)\n label = sitk.GetArrayFromImage(data).astype(np.uint8)\n \n spacing = data.GetSpacing()\n origin = data.GetOrigin()\n direction = data.GetDirection()\n\n for i in range(label.shape[0]):\n label[i] = post_process(label[i],20)\n\n sitk_data = sitk.GetImageFromArray(label)\n sitk_data.SetSpacing(spacing)\n sitk_data.SetOrigin(origin)\n sitk_data.SetDirection(direction)\n \n save_path = os.path.join(save_folder,item.name)\n sitk.WriteImage(sitk_data, save_path)\n" ]
[ [ "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nilearn/nilearn_sandbox
[ "a41b81c0bb590f1d0e3073be81278007792a1345" ]
[ "examples/bids_automatic_decoding/bids_decoding.py" ]
[ "from os.path import isdir\nfrom os.path import join as opj\nimport copy\nimport itertools\nimport numpy as np\nimport pandas as pd\nfrom pandas import read_csv\nimport nibabel as nib\nfrom nistats.design_matrix import make_first_level_design_matrix\nfrom nistats.first_level_model import FirstLevelModel\nfrom nilearn.decoding import Decoder\nfrom sklearn.utils import Bunch\nfrom joblib import Parallel, delayed\n\n\ndef demo_datasets(dataset):\n '''only intended for API design. Tested only for provided datasets (on drago)\n with fmriprep folder specification. It should be replaced by automatic\n bids fetching tool'''\n\n if dataset == \"ds000105\":\n t_r = 2.5\n subjects = [\"{}\".format(x) for x in range(1, 5)]\n task = \"objectviewing\"\n n_runs = 12\n conditions = ['scissors', 'face', 'cat', 'shoe',\n 'house', 'scrambledpix', 'bottle', 'chair']\n\n data_dir = \"/storage/store/data/OpenNeuro/{}/\".format(dataset)\n source_dir = opj(data_dir, \"{}_R2.0.2/uncompressed/\".format(dataset))\n\n elif dataset == \"ds000107\":\n # Betamaps on blocks, several per subject per condition. Use it\n t_r = 3.0\n subjects = [\"{:0>2d}\".format(x) for x in range(1, 5)]\n task = \"onebacktask\"\n n_runs = 2\n conditions = ['Words', 'Objects',\n 'Scrambled objects', 'Consonant strings']\n\n data_dir = \"/storage/store/data/OpenNeuro/{}/\".format(dataset)\n source_dir = opj(data_dir, \"{}_R2.0.2/uncompressed/\".format(dataset))\n\n elif dataset == \"ds000117\":\n t_r = 2.0\n subjects = [\"{:0>2d}\".format(x) for x in range(1, 5)]\n task = \"facerecognition\"\n n_runs = 9\n conditions = [\"FAMOUS\", \"UNFAMILIAR\", \"SCRAMBLED\"]\n\n data_dir = \"/storage/store/data/OpenNeuro/{}/\".format(dataset)\n source_dir = opj(data_dir, \"{}_R1.0.3\".format(dataset))\n\n runs = [\"{:0>2d}\".format(x) for x in range(1, n_runs)]\n derivatives_dir = opj(data_dir, \"fmriprep/\")\n out_dir = opj(\"/storage/store/derivatives\", dataset)\n\n datasets_infos = Bunch(t_r=t_r, subjects=subjects, task=task, runs=runs,\n conditions=conditions, source_dir=source_dir,\n derivatives_dir=derivatives_dir, out_dir=out_dir)\n\n return datasets_infos\n\n\n\"\"\"\ndataset = \"ds000117\"\nsub, run = datasets_infos.subjects[0], datasets_infos.runs[0]\n(isdir(opj(datasets_infos.source_dir, \"sub-{}\".format(sub), \"func\")) is False)\n(isdir(opj(datasets_infos.source_dir, \"ses-mri\", \"sub-{}\".format(sub), \"func\")) is True)\nopj(datasets_infos.source_dir, \"sub-{}\".format(sub), \"ses-mri\", \"func\")\n\n\"\"\"\n\n\ndef handle_non_bids_ds117(paradigm):\n if 'trial_type' not in paradigm:\n paradigm.rename(columns={\"stim_type\": \"trial_type\"}, inplace=True)\n return paradigm\n\n\ndef mocked_bids_fetcher(dataset):\n '''only intended for API design. Tested only for provided datasets (on drago)\n with fmriprep folder specification. It should be replaced by automatic\n bids fetching tool'''\n\n datasets_infos = demo_datasets(dataset)\n preprocessed_fmri, events, confounds = [], [], []\n for sub, run in itertools.product(datasets_infos.subjects,\n datasets_infos.runs):\n\n # ugly automatic handling of multimodal dataset to focus on fMRI\n if not isdir(opj(datasets_infos.source_dir, \"sub-{}\".format(sub), \"func\")) and isdir(opj(datasets_infos.source_dir, \"sub-{}\".format(sub), \"ses-mri\", \"func\")):\n file = opj(\"sub-{}\".format(sub), \"ses-mri\", \"func\",\n \"sub-{}_ses-mri_task-{}_run-{}\".format(sub,\n datasets_infos.task, run))\n else:\n file = opj(\"sub-{}\".format(sub), \"func\",\n \"sub-{}_task-{}_run-{}\".format(sub, datasets_infos.task, run))\n\n events.append(opj(datasets_infos.source_dir,\n \"{}_events.tsv\".format(file)))\n confounds.append(opj(datasets_infos.derivatives_dir,\n \"{}_bold_confounds.tsv\".format(file)))\n preprocessed_fmri.append(opj(datasets_infos.derivatives_dir,\n \"{}_bold_space-MNI152NLin2009cAsym_preproc.nii.gz\".format(file)))\n\n return datasets_infos, preprocessed_fmri, events, confounds\n\n\ndef read_clean_paradigm(event, trials_of_interest, trials_to_ignore,\n dropna=True, verbose=0):\n \"\"\" loads events file, keep what is interesting for the user\n # COMMENT Maybe drop nans in events automatically only if they are in\n # onset or trial_type columns.\n #\n # TO IMPLEMENT\n # - Filtering trials_to_ignore\n # - Trials merging, if trials of interest is a dictionary : {cond_1:\n # [trial_a, trial_b], cond_2: [trial_c]}, all scans\n # with labels in [trial_a, trial_b] are attributed label cond_1.\n # Then they are handled with respect to type_of_modeling\n # (separated_events, blocks..)\n \"\"\"\n paradigm = handle_non_bids_ds117(\n read_csv(event, delimiter='\\t'))\n if dropna:\n paradigm = paradigm.dropna().reset_index()\n paradigm[\"trial_type\"] = paradigm[\"trial_type\"].str.lower()\n paradigm['trial_type'] = paradigm[\"trial_type\"].str.split(' ')\n paradigm[\"trial_type\"] = paradigm[\"trial_type\"].str.join('_')\n if verbose:\n print(np.unique(paradigm.trial_type.values, return_counts=True))\n return paradigm\n\n\ndef _load_confounds(confound):\n \"\"\"load motion regressors from fmriprep confound.tsv file\n \"\"\"\n # could handle the case of other confounds (not fmriprep) or no confounds\n reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']\n regs = pd.read_csv(confound, delimiter='\\t').filter(\n [\"X\", \"Y\", \"Z\", \"RotX\", \"RotY\", \"RotZ\"]).fillna(0).to_numpy()\n return regs, reg_names\n\n\ndef make_block_paradigm(paradigm, type_of_modeling, time_between_blocks=10000):\n \"\"\"\n create a suitable paradigm structure to regress events block by block\n\n checks two conditions to separate blocks, to be in the same block two\n events must have the same trial_type and have onset less than\n time_between_blocks otherwise, group_counter is increased.\n Events whose type is not in events_of_interest are not grouped.\n \"\"\"\n\n paradigm['group_counter'] = ((np.abs(paradigm.onset.shift().fillna(\n 0) - paradigm.onset) > time_between_blocks) |\n (paradigm.trial_type.shift() != paradigm.trial_type)).cumsum()\n\n # TODO if conditions not of interest, they should keep their initial label\n paradigm['trial_type'] = paradigm.trial_type + \\\n \"_\" + paradigm.group_counter.map(str).str.zfill(2)\n\n return paradigm\n\n\ndef expand_trial_type_per_event(trial_type, per_event_type):\n \"\"\"\n trial_type is the \"trial_type\" column of events.tsv\n per_event_type is the instance \"x\" of trial_type\n\n expands all instances of 'x' into \"x_01\", \"x_02\"..\n \"\"\"\n expanded_trial_type = copy.deepcopy(trial_type)\n n_conds = (trial_type == per_event_type).sum()\n expanded_trial_type[trial_type == per_event_type] = ['%s_%0{}d'.format(\n len(str(n_conds))) % (per_event_type, i) for i in range(n_conds)]\n return expanded_trial_type\n\n\ndef make_event_paradigm(paradigm, events_of_interest):\n \"\"\"create a suitable paradigm structure to regress events trial by trial\n \"\"\"\n trial_type = paradigm[\"trial_type\"].values\n for event_type in events_of_interest:\n trial_type = expand_trial_type_per_event(trial_type, event_type)\n return paradigm\n\n\ndef fit_design_matrix(fmri, model, save_location, design_matrix, event,\n confound, type_of_modeling, trials_of_interest,\n trials_to_ignore, hrf_model, time_between_blocks,\n verbose):\n \"\"\" This functions create a suitable design matrix if not provided, fit it\n and save the output zmaps\n\n fmri : one run of one task for one subject (usually preprocessed)\n model : nilearn.stats.FirstLevelModel to fit to fmri using design_matrix\n design_matrix : matrix used to fit the model to fmri. If provided,\n all following arguments will be ignored : event, confound,\n type_of_modeling, trials_of_interest, trials_to_ignore, hrf_model\n event : event file corresponding to fmri\n confound : movement confounders to regress out (only fmriprep for now)\n type_of_modeling : string\n type_of_modeling can be in [\"event-related\", \"block-design\",\n \"mumford\", \"session\"]\n trials_of_interest : list of values of trial_type column to keep\n proposed feature if trials of interest is a\n dictionary {cond_1:[trial_a,trial_b], cond_2:[trial_c]} we could easily\n trials_to_ignore : list of values of trial_type column to filter out\n time_between_blocks : additional parameter for block-design\n Note : If some trial are not of interest nor to ignore, they will be\n regressed during GLM but then not used for decoding.\n Note 2 : By default all trial types are of interest, None to ignore\n \"\"\"\n\n if design_matrix is None:\n n_scans = nib.load(fmri).shape[3]\n frame_times = np.arange(n_scans) * model.t_r\n\n if trials_of_interest == \"all\":\n paradigm = handle_non_bids_ds117(\n read_csv(event, delimiter='\\t'))\n trials_of_interest = np.unique(paradigm.trial_type.dropna().values)\n trials_of_interest_ = ['_'.join(event.lower().split(\" \"))\n for event in trials_of_interest]\n\n paradigm = read_clean_paradigm(event, trials_of_interest_,\n trials_to_ignore, verbose=verbose)\n\n if type_of_modeling in [\"event-related\", \"mumford\"]:\n paradigm = make_event_paradigm(paradigm, trials_of_interest_)\n elif type_of_modeling == \"block-design\":\n paradigm = make_block_paradigm(\n paradigm, trials_of_interest_, time_between_blocks)\n elif type_of_modeling == \"session\":\n paradigm = paradigm\n\n regs, reg_names = _load_confounds(confound)\n\n design_matrix = make_first_level_design_matrix(\n frame_times, events=paradigm, hrf_model=hrf_model, add_regs=regs,\n add_reg_names=reg_names)\n else:\n trials_of_interest_ = trials_of_interest\n type_of_modeling = \"custom\"\n\n model.fit(fmri, design_matrices=[design_matrix])\n # TO IMPROVE, for saving purpose for now.\n run = fmri.split('/')[-1].split('_')[-4]\n sub = fmri.split('/')[-1].split('_')[0]\n\n filenames = []\n for trial in design_matrix.loc[:, design_matrix.columns.str.contains('|'.join(trials_of_interest_))].columns:\n image = model.compute_contrast(design_matrix.columns == trial)\n filename = os.path.join(save_location, \"{}_{}_{}_{}.nii.gz\".format(\n sub, run, trial, type_of_modeling))\n image.to_filename(filename)\n filenames.append(filename)\n sorted(filenames)\n return filenames\n\n\nclass InterSubjectPipelineGLMDecoding():\n def __init__(self, dataset, smoothing_fwhm=5, mask=None, high_pass=.01,\n type_of_modeling=\"event-r\", trials_of_interest=\"all\",\n trials_to_ignore=[], hrf_model=\"spm\", time_between_blocks=10000,\n decoder=Decoder(), n_jobs=1, verbose=0):\n '''\n type_of_modeling : string\n type_of_modeling can be in [\"event-related\", \"block-design\",\n \"mumford\", \"session\"]\n trials_of_interest : list of values of trial_type column to keep\n proposed feature if trials of interest is a\n dictionary {cond_1:[trial_a,trial_b], cond_2:[trial_c]}\n trial_a,trial_b are merged into cond_1 event types\n trials_to_ignore : list of values of trial_type column to filter out\n time_between_blocks : additional parameter for block-design,\n by default, too big to be used to separate blocks.\n '''\n dataset_infos, niimgs, events, confounds = mocked_bids_fetcher(\n dataset)\n self.dataset = dataset\n self.smoothing_fwhm = smoothing_fwhm\n self.mask = mask\n self.high_pass = high_pass\n self.type_of_modeling = type_of_modeling\n self.trials_of_interest = trials_of_interest\n self.trials_to_ignore = trials_to_ignore\n self.hrf_model = hrf_model\n self.t_r = dataset_infos.t_r\n self.subjects = dataset_infos.subjects\n self.task = dataset_infos.task\n self.runs = dataset_infos.runs,\n self.conditions = dataset_infos.conditions\n self.source_dir = dataset_infos.source_dir\n self.derivatives_dir = dataset_infos.derivatives_dir\n self.out_dir = dataset_infos.out_dir\n self.niimgs = niimgs\n self.events = events\n self.confounds = confounds\n self.model = FirstLevelModel(mask=self.mask,\n smoothing_fwhm=self.smoothing_fwhm,\n high_pass=self.high_pass, t_r=self.t_r)\n self.decoder = decoder\n self.time_between_blocks = time_between_blocks\n self.n_jobs = n_jobs\n self.verbose = verbose\n\n def fit(self, design_matrices=None):\n\n # To add : inputting list of design_matrix with check that it has the\n # right length. Not implementing for now\n\n # To add mumford : add an addition loop level\n '''for trial_of_interest in trials_of_interest:\n fit_design_matrix(trials_of_interest=[trial_of_interest])\n '''\n\n filenames = Parallel(n_jobs=self.n_jobs)(delayed(\n fit_design_matrix)\n (fmri, self.model, self.out_dir, None, event, confound,\n self.type_of_modeling, self.trials_of_interest,\n self.trials_to_ignore, self.hrf_model, self.time_between_blocks,\n self.verbose)\n for fmri, event, confound in zip(self.niimgs,\n self.events, self.confounds))\n\n self.glm_files = np.hstack(filenames)\n\n file_infos = [file.split('/')[-1].split('_')\n for file in self.glm_files]\n\n self.glm_files_subs = np.asarray([file[0] for file in file_infos])\n self.glm_files_runs = np.asarray([file[1] for file in file_infos])\n self.glm_files_labels = np.asarray([file[2] for file in file_infos])\n\n self.decoder.fit(self.glm_files, self.glm_files_labels,\n groups=self.glm_files_subs)\n\n\n# Quick to run even with n_jobs=1, example on ds107\ndataset_infos = demo_datasets(\"ds000107\")\nmask_ds107 = opj(dataset_infos.out_dir, \"resampled_MNI_mask_gm.nii.gz\")\npipeline = InterSubjectPipelineGLMDecoding(dataset=\"ds000107\", smoothing_fwhm=5,\n mask=mask_ds107, high_pass=.01,\n type_of_modeling=\"block-design\",\n time_between_blocks=10000,\n decoder=Decoder(), n_jobs=1)\npipeline.fit()\nprint(pipeline.decoder.cv_scores_)\n\n\n# Example, quick with n_jobs=10\n\ndataset_infos = demo_datasets(\"ds000117\")\nmask_ds117 = opj(dataset_infos.out_dir, \"resampled_MNI_mask_gm.nii.gz\")\npipeline = InterSubjectPipelineGLMDecoding(dataset=\"ds000117\", smoothing_fwhm=5,\n mask=mask_ds117, high_pass=.01,\n type_of_modeling=\"event-related\",\n decoder=Decoder(), n_jobs=10)\npipeline.fit()\nprint(pipeline.decoder.cv_scores_)\n\n# Example on Haxby dataset quick with n_jobs=10\n\ndataset_infos = demo_datasets(\"ds000105\")\nmask_ds105 = opj(dataset_infos.out_dir, \"resampled_MNI_mask_gm.nii.gz\")\npipeline = InterSubjectPipelineGLMDecoding(dataset=\"ds000105\", smoothing_fwhm=5,\n mask=mask_ds105, high_pass=.01,\n type_of_modeling=\"event-related\",\n trials_of_interest=['scissors', 'face', 'cat', 'shoe',\n 'house', 'scrambledpix', 'bottle', 'chair'],\n decoder=Decoder(), n_jobs=10)\npipeline.fit()\nprint(pipeline.decoder.cv_scores_)\n" ]
[ [ "numpy.hstack", "pandas.read_csv", "sklearn.utils.Bunch", "numpy.unique", "numpy.asarray", "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
datalayer-contrib/jupyter-sparkmagic
[ "2e4c2a2aa153e32a6428de863bd829ec03844609" ]
[ "sparkmagic/sparkmagic/utils/utils.py" ]
[ "# Distributed under the terms of the Modified BSD License.\nfrom IPython.core.error import UsageError\nfrom IPython.core.magic_arguments import parse_argstring\nimport numpy as np\nimport pandas as pd\nimport json\nimport importlib\nfrom collections import OrderedDict\n\nimport sparkmagic.utils.configuration as conf\nimport sparkmagic.utils.constants as constants\nfrom sparkmagic.livyclientlib.exceptions import (\n BadUserDataException,\n DataFrameParseException,\n BadUserConfigurationException,\n)\n\n\ndef get_coerce_value(coerce):\n if coerce is not None:\n coerce = coerce.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\")\n return coerce\n\n\ndef parse_argstring_or_throw(magic_func, argstring, parse_argstring=parse_argstring):\n \"\"\"An alternative to the parse_argstring method from IPython.core.magic_arguments.\n Catches IPython.core.error.UsageError and propagates it as a\n livyclientlib.exceptions.BadUserDataException.\"\"\"\n try:\n return parse_argstring(magic_func, argstring)\n except UsageError as e:\n raise BadUserDataException(str(e))\n\n\ndef coerce_pandas_df_to_numeric_datetime(df):\n for column_name in df.columns:\n coerced = False\n\n if df[column_name].isnull().all():\n continue\n\n if not coerced and df[column_name].dtype == np.dtype(\"object\"):\n try:\n df[column_name] = pd.to_datetime(df[column_name], errors=\"raise\")\n coerced = True\n except (ValueError, TypeError, OverflowError):\n pass\n\n if not coerced and df[column_name].dtype == np.dtype(\"object\"):\n try:\n df[column_name] = pd.to_numeric(df[column_name], errors=\"raise\")\n coerced = True\n except (ValueError, TypeError):\n pass\n\n\ndef records_to_dataframe(records_text, kind, coerce=None):\n if records_text in [\"\", \"[]\"]:\n strings = []\n else:\n strings = records_text.strip().split(\"\\n\")\n try:\n data_array = [\n json.JSONDecoder(object_pairs_hook=OrderedDict).decode(s) for s in strings\n ]\n\n if kind == constants.SESSION_KIND_SPARKR and len(data_array) > 0:\n data_array = data_array[0]\n\n df = pd.DataFrame(data_array)\n\n if len(data_array) > 0:\n # This will assign the columns in the right order. If we simply did\n # df = pd.DataFrame(data_array, columns=data_array[0].keys())\n # in the code defining df, above, we could get an issue where the first element\n # has some columns as null, and thus would drop the columns from the df altogether.\n # Refer to https://github.com/jupyter-incubator/sparkmagic/issues/346 for\n # more details.\n for data in data_array:\n if len(data.keys()) == len(df.columns):\n df = df[list(data.keys())]\n break\n\n if coerce is None:\n coerce = conf.coerce_dataframe()\n if coerce:\n coerce_pandas_df_to_numeric_datetime(df)\n\n return df\n except ValueError:\n raise DataFrameParseException(\n \"Cannot parse object as JSON: '{}'\".format(strings)\n )\n\n\ndef get_sessions_info_html(info_sessions, current_session_id):\n html = (\n \"\"\"<table>\n<tr><th>ID</th><th>YARN Application ID</th><th>Kind</th><th>State</th><th>Spark UI</th><th>Driver log</th><th>User</th><th>Current session?</th></tr>\"\"\"\n + \"\".join(\n [session.get_row_html(current_session_id) for session in info_sessions]\n )\n + \"</table>\"\n )\n\n return html\n\n\ndef initialize_auth(args):\n \"\"\"Creates an authenticatior class instance for the given auth type\n\n Args:\n args (IPython.core.magics.namespace): The namespace object that is created from\n parsing %spark magic command\n\n Returns:\n An instance of a valid Authenticator or None if args.auth is 'None'\n\n Raises:\n sparkmagic.livyclientlib.BadUserConfigurationException: if args.auth is not a valid\n authenticator class.\n \"\"\"\n if args.auth is None:\n auth = conf.get_auth_value(args.user, args.password)\n else:\n auth = args.auth\n if auth == constants.NO_AUTH:\n return None\n else:\n full_class = conf.authenticators().get(auth)\n if full_class is None:\n raise BadUserConfigurationException(\"Auth '{}' not supported\".format(auth))\n module, class_name = (full_class).rsplit(\".\", 1)\n events_handler_module = importlib.import_module(module)\n auth_class = getattr(events_handler_module, class_name)\n return auth_class(args)\n\n\nclass Namespace:\n \"\"\"Namespace to initialize authenticator class with\"\"\"\n\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n" ]
[ [ "pandas.to_numeric", "numpy.dtype", "pandas.to_datetime", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
jhrmnn/schnetpack
[ "2f96dee7d184b8db8ee610d6743570daeb3763b9" ]
[ "src/schnetpack/sacred/calculator_ingredients.py" ]
[ "from sacred import Ingredient\nimport os\nimport torch\n\nfrom schnetpack.md.calculators import SchnetPackCalculator\nfrom schnetpack.md.utils import MDUnits\n\ncalculator_ingradient = Ingredient('calculator')\n\n\n@calculator_ingradient.config\ndef config():\n \"\"\"configuration for the calculator ingredient\"\"\"\n calculator = 'schnet_calculator'\n required_properties = ['y', 'dydx']\n force_handle = 'dydx'\n position_conversion = 1.0 / MDUnits.angs2bohr\n force_conversion = 1.0 / MDUnits.auforces2aseforces\n property_conversion = {}\n\n model_path = 'eth_ens_01.model'\n # If model is a directory, search for best_model file\n if os.path.isdir(model_path):\n model_path = os.path.join(model_path, 'best_model')\n\n\n@calculator_ingradient.capture\ndef load_model(_log, model_path, device):\n _log.info('Loaded model from {:s}'.format(model_path))\n model = torch.load(model_path).to(device)\n return model\n\n\n@calculator_ingradient.capture\ndef build_calculator(_log, required_properties, force_handle,\n position_conversion, force_conversion,\n property_conversion, calculator, device):\n \"\"\"\n Build the calculator object from the provided settings.\n\n Args:\n model (torch.nn.module): the model which is used for property calculation\n required_properties (list): list of properties that are calculated by the model\n force_handle (str): name of the forces property in the model output\n position_conversion (float): conversion factor for positions\n force_conversion (float): conversion factor for forces\n property_conversion (dict): dictionary with conversion factors for other properties\n calculator (src.schnetpack.md.calculator.Calculator): calculator object\n\n Returns:\n the calculator object\n \"\"\"\n _log.info(f'Using {calculator}')\n\n if calculator == 'schnet_calculator':\n\n model = load_model(device=device)\n return SchnetPackCalculator(model,\n required_properties=required_properties,\n force_handle=force_handle,\n position_conversion=position_conversion,\n force_conversion=force_conversion,\n property_conversion=property_conversion)\n else:\n raise NotImplementedError\n" ]
[ [ "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Toni-Chan/IBM-TableRecognition
[ "34eb433184e07d74b060fbb4c14c110fd284c0dd" ]
[ "Chinese-OCR/ctpn/lib/fast_rcnn/config.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"Fast R-CNN config system.\nThis file specifies default config options for Fast R-CNN. You should not\nchange values in this file. Instead, you should write a config file (in yaml)\nand use cfg_from_file(yaml_file) to load it and override the default options.\nMost tools in $ROOT/tools take a --cfg option to specify an override file.\n - See tools/{train,test}_net.py for example code that uses cfg_from_file()\n - See experiments/cfgs/*.yml for example YAML config override files\n\"\"\"\n\nimport os\nimport os.path as osp\nimport numpy as np\nfrom time import strftime, localtime\nfrom easydict import EasyDict as edict\n\n__C = edict()\n# Consumers can get config by:\n# from fast_rcnn_config import cfg\ncfg = __C\n\n#\n# Training options\n#\n\n# region proposal network (RPN) or not\n__C.IS_RPN = True\n__C.ANCHOR_SCALES = [16]\n__C.NCLASSES = 2\n\n# multiscale training and testing\n__C.IS_MULTISCALE = False\n__C.IS_EXTRAPOLATING = True\n\n__C.REGION_PROPOSAL = 'RPN'\n\n__C.NET_NAME = 'VGGnet'\n__C.SUBCLS_NAME = 'voxel_exemplars'\n\n__C.TRAIN = edict()\n# Adam, Momentum, RMS\n__C.TRAIN.SOLVER = 'Momentum'\n# learning rate\n__C.TRAIN.WEIGHT_DECAY = 0.0005\n__C.TRAIN.LEARNING_RATE = 0.001\n__C.TRAIN.MOMENTUM = 0.9\n__C.TRAIN.GAMMA = 0.1\n__C.TRAIN.STEPSIZE = 50000\n__C.TRAIN.DISPLAY = 10\n__C.TRAIN.LOG_IMAGE_ITERS = 100\n__C.TRAIN.OHEM = False\n__C.TRAIN.RANDOM_DOWNSAMPLE = False\n\n# Scales to compute real features\n__C.TRAIN.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0)\n# __C.TRAIN.SCALES_BASE = (1.0,)\n\n# parameters for ROI generating\n#__C.TRAIN.SPATIAL_SCALE = 0.0625\n__C.TRAIN.KERNEL_SIZE = 5\n\n# Aspect ratio to use during training\n# __C.TRAIN.ASPECTS = (1, 0.75, 0.5, 0.25)\n__C.TRAIN.ASPECTS= (1,)\n\n\n# Scales to use during training (can list multiple scales)\n# Each scale is the pixel size of an image's shortest side\n__C.TRAIN.SCALES = (600,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TRAIN.MAX_SIZE = 1000\n\n# Images to use per minibatch\n__C.TRAIN.IMS_PER_BATCH = 2\n\n# Minibatch size (number of regions of interest [ROIs])\n__C.TRAIN.BATCH_SIZE = 128\n\n# Fraction of minibatch that is labeled foreground (i.e. class > 0)\n__C.TRAIN.FG_FRACTION = 0.25\n\n# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)\n__C.TRAIN.FG_THRESH = 0.5\n\n# Overlap threshold for a ROI to be considered background (class = 0 if\n# overlap in [LO, HI))\n__C.TRAIN.BG_THRESH_HI = 0.5\n__C.TRAIN.BG_THRESH_LO = 0.1\n\n# Use horizontally-flipped images during training?\n__C.TRAIN.USE_FLIPPED = True\n\n# Train bounding-box regressors\n__C.TRAIN.BBOX_REG = True\n\n# Overlap required between a ROI and ground-truth box in order for that ROI to\n# be used as a bounding-box regression training example\n__C.TRAIN.BBOX_THRESH = 0.5\n\n# Iterations between snapshots\n__C.TRAIN.SNAPSHOT_ITERS = 5000\n\n# solver.prototxt specifies the snapshot path prefix, this adds an optional\n# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel\n__C.TRAIN.SNAPSHOT_PREFIX = 'VGGnet_fast_rcnn'\n__C.TRAIN.SNAPSHOT_INFIX = ''\n\n# Use a prefetch thread in roi_data_layer.layer\n# So far I haven't found this useful; likely more engineering work is required\n__C.TRAIN.USE_PREFETCH = False\n\n# Normalize the targets (subtract empirical mean, divide by empirical stddev)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS = True\n# Deprecated (inside weights)\n# used for assigning weights for each coords (x1, y1, w, h)\n__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n# Normalize the targets using \"precomputed\" (or made up) means and stdevs\n# (BBOX_NORMALIZE_TARGETS must also be True)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True\n__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)\n__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)\n# faster rcnn dont use pre-generated rois by selective search\n# __C.TRAIN.BBOX_NORMALIZE_STDS = (1, 1, 1, 1)\n\n# Train using these proposals\n__C.TRAIN.PROPOSAL_METHOD = 'selective_search'\n\n# Make minibatches from images that have similar aspect ratios (i.e. both\n# tall and thin or both short and wide) in order to avoid wasting computation\n# on zero-padding.\n__C.TRAIN.ASPECT_GROUPING = True\n# preclude rois intersected with dontcare areas above the value\n__C.TRAIN.DONTCARE_AREA_INTERSECTION_HI = 0.5\n__C.TRAIN.PRECLUDE_HARD_SAMPLES = True\n# Use RPN to detect objects\n__C.TRAIN.HAS_RPN = True\n# IOU >= thresh: positive example\n__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7\n# IOU < thresh: negative example\n__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3\n# If an anchor statisfied by positive and negative conditions set to negative\n__C.TRAIN.RPN_CLOBBER_POSITIVES = False\n# Max number of foreground examples\n__C.TRAIN.RPN_FG_FRACTION = 0.5\n# Total number of examples\n__C.TRAIN.RPN_BATCHSIZE = 256\n# NMS threshold used on RPN proposals\n__C.TRAIN.RPN_NMS_THRESH = 0.7\n# Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000\n# Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TRAIN.RPN_POST_NMS_TOP_N = 2000\n# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)\n__C.TRAIN.RPN_MIN_SIZE = 8\n# Deprecated (outside weights)\n__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n# Give the positive RPN examples weight of p * 1 / {num positives}\n# and give negatives a weight of (1 - p)\n# Set to -1.0 to use uniform example weighting\n__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0\n# __C.TRAIN.RPN_POSITIVE_WEIGHT = 0.5\n\n\n#\n# Testing options\n#\n\n__C.TEST = edict()\n\n# Scales to use during testing (can list multiple scales)\n# Each scale is the pixel size of an image's shortest side\n__C.TEST.SCALES = (900,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TEST.MAX_SIZE = 1500\n\n# Overlap threshold used for non-maximum suppression (suppress boxes with\n# IoU >= this threshold)\n__C.TEST.NMS = 0.3\n\n# Experimental: treat the (K+1) units in the cls_score layer as linear\n# predictors (trained, eg, with one-vs-rest SVMs).\n__C.TEST.SVM = False\n\n# Test using bounding-box regressors\n__C.TEST.BBOX_REG = True\n\n# Propose boxes\n__C.TEST.HAS_RPN = True\n\n# Test using these proposals\n__C.TEST.PROPOSAL_METHOD = 'selective_search'\n\n## NMS threshold used on RPN proposals\n__C.TEST.RPN_NMS_THRESH = 0.7\n## Number of top scoring boxes to keep before apply NMS to RPN proposals\n#__C.TEST.RPN_PRE_NMS_TOP_N = 6000\n__C.TEST.RPN_PRE_NMS_TOP_N = 12000\n## Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TEST.RPN_POST_NMS_TOP_N = 1000\n#__C.TEST.RPN_POST_NMS_TOP_N = 2000\n# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)\n__C.TEST.RPN_MIN_SIZE = 8\n\n\n#\n# MISC\n#\n\n# The mapping from image coordinates to feature map coordinates might cause\n# some boxes that are distinct in image space to become identical in feature\n# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor\n# for identifying duplicate boxes.\n# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16\n__C.DEDUP_BOXES = 1./16.\n\n# Pixel mean values (BGR order) as a (1, 1, 3) array\n# We use the same pixel mean for all networks even though it's not exactly what\n# they were trained with\n__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])\n\n# For reproducibility\n#__C.RNG_SEED = 3\n__C.RNG_SEED = 3\n\n# A small number that's used many times\n__C.EPS = 1e-14\n\n# Root directory of project\n__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))\n\n# Data directory\n__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))\n\n# Model directory\n__C.MODELS_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'models', 'pascal_voc'))\n\n# Name (or path to) the matlab executable\n__C.MATLAB = 'matlab'\n\n# Place outputs under an experiments directory\n__C.EXP_DIR = 'default'\n__C.LOG_DIR = 'default'\n\n# Use GPU implementation of non-maximum suppression\n__C.USE_GPU_NMS = True\n\n# Default GPU device id\n__C.GPU_ID = 0\n\ndef get_output_dir(imdb, weights_filename):\n \"\"\"Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))\n if weights_filename is not None:\n outdir = osp.join(outdir, weights_filename)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\ndef get_log_dir(imdb):\n \"\"\"Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n log_dir = osp.abspath(\\\n osp.join(__C.ROOT_DIR, 'logs', __C.LOG_DIR, imdb.name, strftime(\"%Y-%m-%d-%H-%M-%S\", localtime())))\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n return log_dir\n\ndef _merge_a_into_b(a, b):\n \"\"\"Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n \"\"\"\n if type(a) is not edict:\n return\n\n for k, v in a.iteritems():\n # a must specify keys that are in b\n if not b.has_key(k):\n raise KeyError('{} is not a valid config key'.format(k))\n\n # the types must match, too\n old_type = type(b[k])\n if old_type is not type(v):\n if isinstance(b[k], np.ndarray):\n v = np.array(v, dtype=b[k].dtype)\n else:\n raise ValueError(('Type mismatch ({} vs. {}) '\n 'for config key: {}').format(type(b[k]),\n type(v), k))\n\n # recursively merge dicts\n if type(v) is edict:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print('Error under config key: {}'.format(k))\n raise\n else:\n b[k] = v\n\ndef cfg_from_file(filename):\n \"\"\"Load a config file and merge it into the default options.\"\"\"\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)\n\ndef cfg_from_list(cfg_list):\n \"\"\"Set config keys via list (e.g., from command line).\"\"\"\n from ast import literal_eval\n assert len(cfg_list) % 2 == 0\n for k, v in zip(cfg_list[0::2], cfg_list[1::2]):\n key_list = k.split('.')\n d = __C\n for subkey in key_list[:-1]:\n assert d.has_key(subkey)\n d = d[subkey]\n subkey = key_list[-1]\n assert d.has_key(subkey)\n try:\n value = literal_eval(v)\n except:\n # handle the case when v is a string literal\n value = v\n assert type(value) == type(d[subkey]), \\\n 'type {} does not match original type {}'.format(\n type(value), type(d[subkey]))\n d[subkey] = value" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SalahEddineLahniche/MLC-Kaggle-2017
[ "489b76182227cbf51812c051381da4e58098d338" ]
[ "ABONO/Regressor.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom sklearn import linear_model, ensemble, svm, neural_network, neighbors\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport pickle\nimport xgboost \n\n\nclass Regressor:\n def __init__(self, session, train_df, test_df=None, dcols=None, model=None, **kwargs):\n self.df = train_df\n self.session = session\n self.tdf = test_df\n self.dcols = dcols if dcols else []\n if type(model) == type('') or not model:\n if (model == \"linear\") or model == \"l\":\n self.model = linear_model.LinearRegression(**kwargs)\n elif (model == \"random_forst\") or model == \"rf\":\n self.model = ensemble.RandomForestRegressor(**kwargs)\n elif model == \"svr\":\n self.model = svm.SVR(**kwargs)\n elif model == \"nn\":\n self.model = neural_network.MLPRegressor(**kwargs)\n elif model == \"knn\":\n self.model = neighbors.KNeighborsRegressor(**kwargs)\n elif model == \"lasso\":\n self.model = linear_model.Lasso(**kwargs)\n elif model == \"en\":\n self.model = linear_model.ElasticNet(**kwargs)\n elif model == \"xgb\":\n self.model = xgboost.XGBRegressor(**kwargs)\n elif model == 'gb':\n params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,\n 'learning_rate': 0.01, 'loss': 'ls'}\n self.model = ensemble.GradientBoostingRegressor(**params)\n self.fit = True\n else:\n self.fit = False\n self.model = model\n\n \n def cross_validate(self, length=None, test_size=0.2, fit=True):\n X = self.df.drop(labels=(['power_increase'] + self.dcols), axis=1)[:length].as_matrix()\n y = self.df['power_increase'][:length].as_matrix()\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=0)\n if fit and self.fit:\n self.model.fit(X_train, y_train)\n pickle.dump(self.model, self.session.modelf)\n self.fit = False\n y_pred = self.model.predict(X_test)\n return mean_squared_error(y_test, y_pred)\n\n \n def predict(self):\n X = self.df.drop(labels=(['power_increase'] + self.dcols), axis=1).as_matrix()\n y = self.df['power_increase'].as_matrix()\n rX = self.tdf.drop(self.dcols, axis=1).as_matrix()\n if self.fit:\n self.model.fit(X, y)\n pickle.dump(self.model, self.session.modelf)\n y_pred = self.model.predict(rX)\n return y_pred\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "sklearn.linear_model.ElasticNet", "sklearn.model_selection.train_test_split", "sklearn.metrics.mean_squared_error", "sklearn.svm.SVR", "sklearn.neighbors.KNeighborsRegressor", "sklearn.linear_model.Lasso", "sklearn.ensemble.GradientBoostingRegressor", "sklearn.linear_model.LinearRegression", "sklearn.neural_network.MLPRegressor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BrigitaPetk/support_department_dashboard
[ "309e8b3e879a7420a0133596538feecf35813535" ]
[ "data_processing/base_creation.py" ]
[ "import pandas as pd\nfrom pathlib import Path\nimport os\n\n\nclass BaseCreator():\n def creator(self, filter_direktorija, final_direktorija, filter_direktorija_GB, final_direktorija_GB):\n\n def base_creator(location, name):\n filter_files = list(location.glob(\"*.csv*\"))\n if not filter_files:\n empty_data_base = {'TecReq#': [], 'Year':[],'Month':[], 'Week':[], 'Created+3mod':[], 'First Lock+3mod':[], 'FirstResponse+3mod':[], 'Close Time+3mod':[], \n 'First Response - Created':[], 'First Lock - Created':[], 'Queue':[], 'Owner Country Code':[], 'State':[], 'Number of Articles':[], \n 'Needed knowledge level':[], 'Case CV':[]}\n df = pd.DataFrame(empty_data_base)\n df.to_csv(f'{location}/{name}.csv', sep=';', index=False)\n print(\"base was created\")\n else: \n print(\"not empty folder\")\n\n\n def filtration_by_week_creator(location, name):\n final_files = list(location.glob(\"*.csv*\"))\n if not final_files:\n empty_data_base = {'Year':[], 'Month':[], 'Week': [],'request number': [],'request number in LT': [], \n 'request number in AT':[] , 'request number in CH':[] ,\n 'request number in DE':[] , 'request number in IN':[] ,\n 'IMP request number': [], 'SMART request number':[], 'STRAT request number': [],\n 'EMPTY request number': [], 'LT request number in DE':[],\n 'LT request number in CH':[], 'LT request number in AT':[], 'LT CTS': [],\n 'knowledge level empty': [], 'knowledge level 1-2': [], 'knowledge level 3-4': [],\n 'STRAT solution time': [], 'IMP solution time': [], 'SMART solution time': [],\n 'EMPTY solution time': [], 'Intake': [] }\n df = pd.DataFrame(empty_data_base)\n df.to_csv(f'{location}/{name}.csv', sep=',', index=False)\n print(\"final data base was created\")\n else: \n print(\"not empty folder\")\n\n def filtration_by_week_creator_GB(location, name):\n final_files = list(location.glob(\"*.csv*\"))\n if not final_files:\n empty_data_base = {'Year':[], 'Month':[], 'Week': [],'request number': [],'request number in LT': [], \n 'request number in GB':[] , 'request number in IE':[], 'request number in IN':[] ,\n 'IMP request number': [], 'SMART request number':[], 'STRAT request number': [],\n 'EMPTY request number': [], 'LT request number in GB':[],\n 'LT request number in IE':[], 'LT CTS': [],\n 'knowledge level empty': [], 'knowledge level 1-2': [], 'knowledge level 3-4': [],\n 'STRAT solution time': [], 'IMP solution time': [], 'SMART solution time': [],\n 'EMPTY solution time': [], 'Intake': [] }\n df = pd.DataFrame(empty_data_base)\n df.to_csv(f'{final_direktorija_GB}/{name}.csv', sep=',', index=False)\n print(\"final data base was created\")\n else: \n print(\"not empty folder\")\n\n def base_by_month_creator(location, name):\n filter_files = list(location.glob(\"*.csv*\"))\n file_names = [Path(file).name for file in filter_files]\n for file_name in file_names:\n if file_name != f\"{name}.csv\":\n data = {'Month':[], 'Request number': [], 'Request number LT': []}\n df = pd.DataFrame(data)\n df.to_csv(f'{location}/{name}.csv', sep=',', index=False)\n print(\"base by month was created\")\n else: \n print(\"month file already exists\")\n\n\n\n base_creator(filter_direktorija, 'base')\n base_creator(filter_direktorija_GB, 'base_GB')\n filtration_by_week_creator(final_direktorija, 'base_by_week')\n filtration_by_week_creator_GB(final_direktorija_GB, 'base_by_week')\n base_by_month_creator(final_direktorija, \"base_by_month\")\n base_by_month_creator(final_direktorija_GB, \"base_by_month_GB\")\n " ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
mikanikos/MachineLearning_Project1
[ "994d9f09a8ffc7ee6379c918bfdc69c22ad38509" ]
[ "proj1_helpers.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"some helper functions for project 1.\"\"\"\nimport csv\nimport numpy as np\n\n\ndef load_csv_data(data_path, sub_sample=False):\n \"\"\"Loads data and returns y (class labels), tX (features) and ids (event ids)\"\"\"\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=1)\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\n ids = x[:, 0].astype(np.int)\n input_data = x[:, 2:]\n\n # convert class labels from strings to binary (-1,1)\n yb = np.ones(len(y))\n yb[np.where(y=='b')] = -1\n \n # sub-sample\n if sub_sample:\n yb = yb[::50]\n input_data = input_data[::50]\n ids = ids[::50]\n\n return yb, input_data, ids\n\n\ndef predict_labels(weights, data):\n \"\"\"Generates class predictions given weights, and a test data matrix\"\"\"\n y_pred = np.dot(data, weights)\n y_pred[np.where(y_pred <= 0)] = -1\n y_pred[np.where(y_pred > 0)] = 1\n \n return y_pred\n\n\ndef create_csv_submission(ids, y_pred, name):\n \"\"\"\n Creates an output file in csv format for submission to kaggle\n Arguments: ids (event ids associated with each prediction)\n y_pred (predicted class labels)\n name (string name of .csv output file to be created)\n \"\"\"\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})\n" ]
[ [ "numpy.dot", "numpy.where", "numpy.genfromtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gaurav274/Eva
[ "b9d5bc8d0d46aba811e4da0157d61614be06f1d1" ]
[ "test/util.py" ]
[ "# coding=utf-8\n# Copyright 2018-2020 EVA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport os\n\nfrom src.models.storage.batch import Batch\nfrom src.parser.parser import Parser\nfrom src.optimizer.statement_to_opr_convertor import StatementToPlanConvertor\nfrom src.optimizer.plan_generator import PlanGenerator\nfrom src.executor.plan_executor import PlanExecutor\nfrom src.models.catalog.frame_info import FrameInfo\nfrom src.models.catalog.properties import ColorSpace\nfrom src.udfs.abstract_udfs import AbstractClassifierUDF\n\nNUM_FRAMES = 10\n\n\ndef create_dataframe(num_frames=1) -> pd.DataFrame:\n frames = []\n for i in range(1, num_frames + 1):\n frames.append({\"id\": i, \"data\": (i * np.ones((1, 1)))})\n return pd.DataFrame(frames)\n\n\ndef create_dataframe_same(times=1):\n base_df = create_dataframe()\n for i in range(1, times):\n base_df = base_df.append(create_dataframe())\n\n return base_df\n\n\ndef custom_list_of_dicts_equal(one, two):\n for v1, v2 in zip(one, two):\n if v1.keys() != v2.keys():\n return False\n for key in v1.keys():\n if isinstance(v1[key], np.ndarray):\n if not np.array_equal(v1[key], v2[key]):\n return False\n\n else:\n if v1[key] != v2[key]:\n return False\n\n return True\n\n\ndef create_sample_video(num_frames=NUM_FRAMES):\n try:\n os.remove('dummy.avi')\n except FileNotFoundError:\n pass\n\n out = cv2.VideoWriter('dummy.avi',\n cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10,\n (2, 2))\n for i in range(num_frames):\n frame = np.array(np.ones((2, 2, 3)) * 0.1 * float(i + 1) * 255,\n dtype=np.uint8)\n out.write(frame)\n\n\ndef create_dummy_batches(num_frames=NUM_FRAMES,\n filters=[], batch_size=10, start_id=0):\n if not filters:\n filters = range(num_frames)\n data = []\n for i in filters:\n data.append({'id': i + start_id,\n 'data': np.array(\n np.ones((2, 2, 3)) * 0.1 * float(i + 1) * 255,\n dtype=np.uint8)})\n\n if len(data) % batch_size == 0:\n yield Batch(pd.DataFrame(data))\n data = []\n if data:\n yield Batch(pd.DataFrame(data))\n\n\ndef perform_query(query):\n stmt = Parser().parse(query)[0]\n l_plan = StatementToPlanConvertor().visit(stmt)\n p_plan = PlanGenerator().build(l_plan)\n return PlanExecutor(p_plan).execute_plan()\n\n\nclass DummyObjectDetector(AbstractClassifierUDF):\n\n @property\n def name(self) -> str:\n return \"dummyObjectDetector\"\n\n def __init__(self):\n super().__init__()\n\n @property\n def input_format(self):\n return FrameInfo(-1, -1, 3, ColorSpace.RGB)\n\n @property\n def labels(self):\n return ['__background__', 'person', 'bicycle']\n\n def classify(self, frames: pd.DataFrame):\n # odd are labeled bicycle and even person\n labels = [self.labels[i % 2 + 1] for i in range(len(frames))]\n prediction_df_list = pd.DataFrame({'label': labels})\n return prediction_df_list\n" ]
[ [ "numpy.array_equal", "pandas.DataFrame", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
neevparikh/hierarchical-doom
[ "082f794b9c6101c4e94f15bf4f93c718ee219ea5" ]
[ "algorithms/appooc/shared_buffers.py" ]
[ "import math\n\nimport numpy as np\nimport torch\nfrom gym import spaces\n\nfrom algorithms.appooc.appooc_utils import copy_dict_structure, iter_dicts_recursively, iterate_recursively\nfrom algorithms.appo_common.model_utils import get_hidden_size\nfrom algorithms.utils.action_distributions import calc_num_logits, calc_num_actions\nfrom utils.utils import log\n\n\ndef to_torch_dtype(numpy_dtype):\n \"\"\"from_numpy automatically infers type, so we leverage that.\"\"\"\n x = np.zeros([1], dtype=numpy_dtype)\n t = torch.from_numpy(x)\n return t.dtype\n\n\ndef to_numpy(t, num_dimensions):\n arr_shape = t.shape[:num_dimensions]\n arr = np.ndarray(arr_shape, dtype=object)\n to_numpy_func(t, arr)\n return arr\n\n\ndef to_numpy_func(t, arr):\n if len(arr.shape) == 1:\n for i in range(t.shape[0]):\n arr[i] = t[i]\n else:\n for i in range(t.shape[0]):\n to_numpy_func(t[i], arr[i])\n\n\ndef ensure_memory_shared(*tensors):\n \"\"\"To prevent programming errors, ensure all tensors are in shared memory.\"\"\"\n for tensor_dict in tensors:\n for _, _, t in iterate_recursively(tensor_dict):\n assert t.is_shared()\n\n\nclass PolicyOutput:\n def __init__(self, name, size):\n self.name = name\n self.size = size\n\n def __repr__(self):\n return repr((self.name, self.size))\n\n\nclass SharedBuffers:\n def __init__(self, cfg, num_agents, obs_space, action_space):\n self.cfg = cfg\n self.num_agents = num_agents\n self.envs_per_split = cfg.num_envs_per_worker // cfg.worker_num_splits\n self.num_traj_buffers = self.calc_num_trajectory_buffers()\n\n num_actions = calc_num_actions(action_space)\n num_action_logits = calc_num_logits(action_space)\n self.num_actions = num_actions\n self.num_action_logits = num_action_logits\n\n hidden_size = get_hidden_size(self.cfg)\n\n log.debug('Allocating shared memory for trajectories')\n self.tensors = TensorDict()\n\n # policy inputs\n obs_dict = TensorDict()\n self.tensors['obs'] = obs_dict\n if isinstance(obs_space, spaces.Dict):\n for space_name, space in obs_space.spaces.items():\n obs_dict[space_name] = self.init_tensor(space.dtype, space.shape)\n else:\n raise Exception('Only Dict observations spaces are supported')\n\n # env outputs\n self.tensors['rewards'] = self.init_tensor(torch.float32, [1])\n self.tensors['dones'] = self.init_tensor(torch.bool, [1])\n\n # policy outputs\n policy_outputs = [\n ('action_logits', num_action_logits * cfg.num_options),\n ('actions', num_actions * cfg.num_options),\n ('log_prob_actions', 1 * cfg.num_options),\n ('option_idx', 1),\n ('policy_version', 1),\n ('rnn_states', hidden_size),\n ('termination_mask', cfg.num_options),\n ('termination_prob', cfg.num_options),\n ('values', cfg.num_options),\n ]\n\n policy_outputs = [PolicyOutput(*po) for po in policy_outputs]\n policy_outputs = sorted(policy_outputs, key=lambda policy_output: policy_output.name)\n self.policy_outputs = policy_outputs\n\n for po in policy_outputs:\n self.tensors[po.name] = self.init_tensor(torch.float32, [po.size])\n\n ensure_memory_shared(self.tensors)\n\n # this is for performance optimization\n # indexing in numpy arrays is faster than in PyTorch tensors\n self.tensors_individual_transitions = self.tensor_dict_to_numpy(\n len(self.tensor_dimensions()))\n self.tensor_trajectories = self.tensor_dict_to_numpy(len(self.tensor_dimensions()) - 1)\n\n # create a shared tensor to indicate when the learner is done with the trajectory buffer and\n # it can be used to store the next trajectory\n traj_buffer_available_shape = [\n self.cfg.num_workers,\n self.cfg.worker_num_splits,\n self.envs_per_split,\n self.num_agents,\n self.num_traj_buffers,\n ]\n self.is_traj_tensor_available = torch.ones(traj_buffer_available_shape, dtype=torch.uint8)\n self.is_traj_tensor_available.share_memory_()\n self.is_traj_tensor_available = to_numpy(self.is_traj_tensor_available, 2)\n\n # copying small policy outputs (e.g. individual value predictions & action logits) to shared memory is a\n # bottleneck on the policy worker. For optimization purposes we create additional tensors to hold\n # just concatenated policy outputs. Rollout workers parse the data and add it to the trajectory buffers\n # in a proper format\n policy_outputs_combined_size = sum(po.size for po in policy_outputs)\n policy_outputs_shape = [\n self.cfg.num_workers,\n self.cfg.worker_num_splits,\n self.envs_per_split,\n self.num_agents,\n policy_outputs_combined_size,\n ]\n\n self.policy_outputs = policy_outputs\n self.policy_output_tensors = torch.zeros(policy_outputs_shape, dtype=torch.float32)\n self.policy_output_tensors.share_memory_()\n self.policy_output_tensors = to_numpy(self.policy_output_tensors, 4)\n\n self.policy_versions = torch.zeros([self.cfg.num_policies], dtype=torch.int32)\n self.policy_versions.share_memory_()\n\n # a list of boolean flags to be shared among components that indicate that experience collection should be\n # temporarily stopped (e.g. due to too much experience accumulated on the learner)\n self.stop_experience_collection = torch.ones([self.cfg.num_policies], dtype=torch.bool)\n self.stop_experience_collection.share_memory_()\n\n def calc_num_trajectory_buffers(self):\n # calculate how many buffers are required per env runner to collect one \"macro batch\" for training\n # once macro batch is collected, all buffers will be released\n # we could have just copied the tensors on the learner to avoid this complicated logic, but it's better for\n # performance to keep data in shared buffers until they're needed\n samples_per_iteration = self.cfg.num_batches_per_iteration * self.cfg.batch_size * self.cfg.num_policies\n num_traj_buffers = samples_per_iteration / (self.cfg.num_workers *\n self.cfg.num_envs_per_worker * self.num_agents *\n self.cfg.rollout)\n\n # make sure we definitely have enough buffers to actually never wait\n # usually it'll be just two buffers and we swap back and forth\n num_traj_buffers *= 3\n\n # make sure we have at least two to swap between so we never actually have to wait\n num_traj_buffers = math.ceil(max(num_traj_buffers, self.cfg.min_traj_buffers_per_worker))\n log.info('Using %d sets of trajectory buffers', num_traj_buffers)\n return num_traj_buffers\n\n def init_tensor(self, tensor_type, tensor_shape):\n if not isinstance(tensor_type, torch.dtype):\n tensor_type = to_torch_dtype(tensor_type)\n\n dimensions = self.tensor_dimensions()\n final_shape = dimensions + list(tensor_shape)\n t = torch.zeros(final_shape, dtype=tensor_type)\n t.share_memory_()\n return t\n\n def tensor_dimensions(self):\n dimensions = [\n self.cfg.num_workers,\n self.cfg.worker_num_splits,\n self.envs_per_split,\n self.num_agents,\n self.num_traj_buffers,\n self.cfg.rollout,\n ]\n return dimensions\n\n def tensor_dict_to_numpy(self, num_dimensions):\n numpy_dict = copy_dict_structure(self.tensors)\n for d1, d2, key, curr_t, value2 in iter_dicts_recursively(self.tensors, numpy_dict):\n assert isinstance(curr_t, torch.Tensor)\n assert value2 is None\n d2[key] = to_numpy(curr_t, num_dimensions)\n assert isinstance(d2[key], np.ndarray)\n return numpy_dict\n\n\nclass TensorDict(dict):\n def index(self, indices):\n return self.index_func(self, indices)\n\n def index_func(self, x, indices):\n if isinstance(x, (dict, TensorDict)):\n res = TensorDict()\n for key, value in x.items():\n res[key] = self.index_func(value, indices)\n return res\n else:\n t = x[indices]\n return t\n\n def set_data(self, index, new_data):\n self.set_data_func(self, index, new_data)\n\n def set_data_func(self, x, index, new_data):\n if isinstance(new_data, (dict, TensorDict)):\n for new_data_key, new_data_value in new_data.items():\n self.set_data_func(x[new_data_key], index, new_data_value)\n elif isinstance(new_data, torch.Tensor):\n x[index].copy_(new_data)\n elif isinstance(new_data, np.ndarray):\n t = torch.from_numpy(new_data)\n x[index].copy_(t)\n else:\n raise Exception(f'Type {type(new_data)} not supported in set_data_func')\n" ]
[ [ "torch.ones", "torch.zeros", "torch.from_numpy", "numpy.ndarray", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
miaojinshuai/rl-attack
[ "0b406d6b0dc77189a3f32be3e766f7dc915dbc2a" ]
[ "train.py" ]
[ "import argparse\nimport gym\nimport numpy as np\nimport os\nimport tensorflow as tf\nimport tempfile\nimport time\nimport json\nimport random\n\nimport rlattack.common.tf_util as U\n\nfrom rlattack import logger\nfrom rlattack import deepq\nfrom rlattack.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\nfrom rlattack.common.misc_util import (\n boolean_flag,\n pickle_load,\n pretty_eta,\n relatively_safe_pickle_dump,\n set_global_seeds,\n RunningAvg,\n SimpleMonitor\n)\nfrom rlattack.common.schedules import LinearSchedule, PiecewiseSchedule\n# when updating this to non-deperecated ones, it is important to\n# copy over LazyFrames\nfrom rlattack.common.atari_wrappers_deprecated import wrap_dqn\nfrom rlattack.common.azure_utils import Container\nfrom model import model, dueling_model\nfrom statistics import statistics\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"DQN experiments for Atari games\")\n # Environment\n parser.add_argument(\"--env\", type=str, default=\"Pong\", help=\"name of the game\")\n parser.add_argument(\"--seed\", type=int, default=42, help=\"which seed to use\")\n # Core DQN parameters\n parser.add_argument(\"--replay-buffer-size\", type=int, default=int(1e6), help=\"replay buffer size\")\n parser.add_argument(\"--lr\", type=float, default=1e-4, help=\"learning rate for Adam optimizer\")\n parser.add_argument(\"--num-steps\", type=int, default=int(2e8), help=\"total number of steps to run the environment for\")\n parser.add_argument(\"--batch-size\", type=int, default=32, help=\"number of transitions to optimize at the same time\")\n parser.add_argument(\"--learning-freq\", type=int, default=4, help=\"number of iterations between every optimization step\")\n parser.add_argument(\"--target-update-freq\", type=int, default=40000, help=\"number of iterations between every target network update\")\n # Bells and whistles\n boolean_flag(parser, \"noisy\", default=False, help=\"whether or not to NoisyNetwork\")\n boolean_flag(parser, \"double-q\", default=True, help=\"whether or not to use double q learning\")\n boolean_flag(parser, \"dueling\", default=False, help=\"whether or not to use dueling model\")\n boolean_flag(parser, \"prioritized\", default=False, help=\"whether or not to use prioritized replay buffer\")\n parser.add_argument(\"--prioritized-alpha\", type=float, default=0.6, help=\"alpha parameter for prioritized replay buffer\")\n parser.add_argument(\"--prioritized-beta0\", type=float, default=0.4, help=\"initial value of beta parameters for prioritized replay\")\n parser.add_argument(\"--prioritized-eps\", type=float, default=1e-6, help=\"eps parameter for prioritized replay buffer\")\n # Checkpointing\n parser.add_argument(\"--save-dir\", type=str, default=None, required=True, help=\"directory in which training state and model should be saved.\")\n parser.add_argument(\"--save-azure-container\", type=str, default=None,\n help=\"It present data will saved/loaded from Azure. Should be in format ACCOUNT_NAME:ACCOUNT_KEY:CONTAINER\")\n parser.add_argument(\"--save-freq\", type=int, default=1e6, help=\"save model once every time this many iterations are completed\")\n boolean_flag(parser, \"load-on-start\", default=True, help=\"if true and model was previously saved then training will be resumed\")\n\n #V: Attack Arguments #\n parser.add_argument(\"--attack\", type=str, default=None, help=\"Method to attack the model.\")\n parser.add_argument(\"--attack-init\", type=int, default=0, help=\"Iteration no. to begin attacks\")\n parser.add_argument(\"--attack-prob\", type=float, default=0.0, help=\"Probability of attack at each step, float in range 0 - 1.0\" )\n return parser.parse_args()\n\n\ndef make_env(game_name):\n env = gym.make(game_name + \"NoFrameskip-v4\")\n monitored_env = SimpleMonitor(env) # puts rewards and number of steps in info, before environment is wrapped\n env = wrap_dqn(monitored_env) # applies a bunch of modification to simplify the observation space (downsample, make b/w)\n return env, monitored_env\n\n\ndef maybe_save_model(savedir, container, state):\n \"\"\"This function checkpoints the model and state of the training algorithm.\"\"\"\n if savedir is None:\n return\n start_time = time.time()\n model_dir = \"model-{}\".format(state[\"num_iters\"])\n U.save_state(os.path.join(savedir, model_dir, \"saved\"))\n if container is not None:\n container.put(os.path.join(savedir, model_dir), model_dir)\n relatively_safe_pickle_dump(state, os.path.join(savedir, 'training_state.pkl.zip'), compression=True)\n if container is not None:\n container.put(os.path.join(savedir, 'training_state.pkl.zip'), 'training_state.pkl.zip')\n relatively_safe_pickle_dump(state[\"monitor_state\"], os.path.join(savedir, 'monitor_state.pkl'))\n if container is not None:\n container.put(os.path.join(savedir, 'monitor_state.pkl'), 'monitor_state.pkl')\n logger.log(\"Saved model in {} seconds\\n\".format(time.time() - start_time))\n\n\ndef maybe_load_model(savedir, container):\n \"\"\"Load model if present at the specified path.\"\"\"\n if savedir is None:\n return\n\n state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))\n if container is not None:\n logger.log(\"Attempting to download model from Azure\")\n found_model = container.get(savedir, 'training_state.pkl.zip')\n else:\n found_model = os.path.exists(state_path)\n if found_model:\n state = pickle_load(state_path, compression=True)\n model_dir = \"model-{}\".format(state[\"num_iters\"])\n if container is not None:\n container.get(savedir, model_dir)\n U.load_state(os.path.join(savedir, model_dir, \"saved\"))\n logger.log(\"Loaded models checkpoint at {} iterations\".format(state[\"num_iters\"]))\n return state\n\n\nif __name__ == '__main__':\n args = parse_args()\n # Parse savedir and azure container.\n savedir = args.save_dir\n if args.save_azure_container is not None:\n account_name, account_key, container_name = args.save_azure_container.split(\":\")\n container = Container(account_name=account_name,\n account_key=account_key,\n container_name=container_name,\n maybe_create=True)\n if savedir is None:\n # Careful! This will not get cleaned up. Docker spoils the developers.\n savedir = tempfile.TemporaryDirectory().name\n else:\n container = None\n # Create and seed the env.\n env, monitored_env = make_env(args.env)\n if args.seed > 0:\n set_global_seeds(args.seed)\n env.unwrapped.seed(args.seed)\n\n # V: Save arguments, configure log dump path to savedir #\n if savedir:\n with open(os.path.join(savedir, 'args.json'), 'w') as f:\n json.dump(vars(args), f)\n logger.configure(dir=savedir) # log to savedir\n\n with U.make_session(4) as sess:\n # Create training graph and replay buffer\n act, train, update_target, debug, craft_adv = deepq.build_train(\n make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape, name=name),\n q_func=dueling_model if args.dueling else model,\n num_actions=env.action_space.n,\n optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4),\n gamma=0.99,\n grad_norm_clipping=10,\n double_q=args.double_q,\n noisy=args.noisy,\n attack = args.attack\n )\n approximate_num_iters = args.num_steps / 4\n exploration = PiecewiseSchedule([\n (0, 1.0),\n (approximate_num_iters / 50, 0.1),\n (approximate_num_iters / 5, 0.01)\n ], outside_value=0.01)\n\n if args.prioritized:\n replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha)\n beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0)\n else:\n replay_buffer = ReplayBuffer(args.replay_buffer_size)\n\n U.initialize()\n update_target()\n num_iters = 0\n\n # Load the model\n state = maybe_load_model(savedir, container)\n if state is not None:\n num_iters, replay_buffer = state[\"num_iters\"], state[\"replay_buffer\"],\n monitored_env.set_state(state[\"monitor_state\"])\n\n start_time, start_steps = None, None\n steps_per_iter = RunningAvg(0.999)\n iteration_time_est = RunningAvg(0.999)\n obs = env.reset()\n # Record the mean of the \\sigma\n sigma_name_list = []\n sigma_list = []\n for param in tf.trainable_variables():\n # only record the \\sigma in the action network\n if 'sigma' in param.name and 'deepq/q_func/action_value' in param.name:\n summary_name = param.name.replace('deepq/q_func/action_value/', '').replace('/', '.').split(':')[0]\n sigma_name_list.append(summary_name)\n sigma_list.append(tf.reduce_mean(tf.abs(param)))\n f_mean_sigma = U.function(inputs=[], outputs=sigma_list)\n # Statistics\n writer = tf.summary.FileWriter(savedir, sess.graph)\n im_stats = statistics(scalar_keys=['action', 'im_reward', 'td_errors', 'huber_loss']+sigma_name_list)\n ep_stats = statistics(scalar_keys=['ep_reward', 'ep_length']) \n # Main trianing loop\n ep_length = 0\n while True:\n num_iters += 1\n ep_length += 1\n\n #V: Perturb observation if we are past the init stage and at a designated attack step #\n #if craft_adv != None and (num_iters >= args.attack_init) and ((num_iters - args.attack_init) % args.attack_freq == 0) : \n if craft_adv != None and (num_iters >= args.attack_init) and (random.random() <= args.attack_prob) : \n obs = craft_adv(np.array(obs)[None])[0]\n\n # Take action and store transition in the replay buffer.\n if args.noisy:\n # greedily choose\n action = act(np.array(obs)[None], stochastic=False)[0]\n else:\n # epsilon greedy\n action = act(np.array(obs)[None], update_eps=exploration.value(num_iters))[0]\n new_obs, rew, done, info = env.step(action)\n replay_buffer.add(obs, action, rew, new_obs, float(done))\n obs = new_obs\n if done:\n obs = env.reset()\n\n if (num_iters > max(5 * args.batch_size, args.replay_buffer_size // 20) and\n num_iters % args.learning_freq == 0):\n # Sample a bunch of transitions from replay buffer\n if args.prioritized:\n experience = replay_buffer.sample(args.batch_size, beta=beta_schedule.value(num_iters))\n (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience\n else:\n obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(args.batch_size)\n weights = np.ones_like(rewards)\n # Minimize the error in Bellman's equation and compute TD-error\n td_errors, huber_loss = train(obses_t, actions, rewards, obses_tp1, dones, weights)\n # Update the priorities in the replay buffer\n if args.prioritized:\n new_priorities = np.abs(td_errors) + args.prioritized_eps\n replay_buffer.update_priorities(batch_idxes, new_priorities)\n # Write summary\n mean_sigma = f_mean_sigma()\n im_stats.add_all_summary(writer, [action, rew, np.mean(td_errors), np.mean(huber_loss)]+mean_sigma, num_iters)\n\n # Update target network.\n if num_iters % args.target_update_freq == 0:\n update_target()\n\n if start_time is not None:\n steps_per_iter.update(info['steps'] - start_steps)\n iteration_time_est.update(time.time() - start_time)\n start_time, start_steps = time.time(), info[\"steps\"]\n\n # Save the model and training state.\n if num_iters > 0 and (num_iters % args.save_freq == 0 or info[\"steps\"] > args.num_steps):\n maybe_save_model(savedir, container, {\n 'replay_buffer': replay_buffer,\n 'num_iters': num_iters,\n 'monitor_state': monitored_env.get_state()\n })\n\n if info[\"steps\"] > args.num_steps:\n break\n\n if done:\n steps_left = args.num_steps - info[\"steps\"]\n completion = np.round(info[\"steps\"] / args.num_steps, 1)\n mean_ep_reward = np.mean(info[\"rewards\"][-100:])\n logger.record_tabular(\"% completion\", completion)\n logger.record_tabular(\"steps\", info[\"steps\"])\n logger.record_tabular(\"iters\", num_iters)\n logger.record_tabular(\"episodes\", len(info[\"rewards\"]))\n logger.record_tabular(\"reward (100 epi mean)\", np.mean(info[\"rewards\"][-100:]))\n if not args.noisy:\n logger.record_tabular(\"exploration\", exploration.value(num_iters))\n if args.prioritized:\n logger.record_tabular(\"max priority\", replay_buffer._max_priority)\n fps_estimate = (float(steps_per_iter) / (float(iteration_time_est) + 1e-6)\n if steps_per_iter._value is not None else \"calculating...\")\n logger.dump_tabular()\n logger.log()\n logger.log(\"ETA: \" + pretty_eta(int(steps_left / fps_estimate)))\n logger.log()\n # add summary for one episode\n ep_stats.add_all_summary(writer, [mean_ep_reward, ep_length], num_iters)\n ep_length = 0\n" ]
[ [ "tensorflow.summary.FileWriter", "numpy.ones_like", "numpy.abs", "numpy.round", "numpy.mean", "tensorflow.train.AdamOptimizer", "tensorflow.trainable_variables", "numpy.array", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
heathher/neural_sequence_labeling
[ "81c83443982f5b1723fde3d446eb94e8cb7a4c44" ]
[ "utils/logger.py" ]
[ "import time\nimport sys\nimport logging\nimport numpy as np\n\n\ndef get_logger(filename):\n \"\"\"Return a logger instance that writes in filename\n Args:\n filename: (string) path to log.txt\n Returns:\n logger: (instance of logger)\n \"\"\"\n logger = logging.getLogger('logger')\n logger.setLevel(logging.DEBUG)\n logging.basicConfig(format='%(message)s', level=logging.DEBUG)\n handler = logging.FileHandler(filename)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logging.getLogger().addHandler(handler)\n return logger\n\n\nclass Progbar(object):\n \"\"\"Progbar class copied from keras (https://github.com/fchollet/keras/)\n Displays a progress bar.\n Small edit : added strict arg to update\n Arguments\n target: Total number of steps expected.\n interval: Minimum visual progress update interval (in seconds).\n \"\"\"\n def __init__(self, target, width=30, verbose=1):\n self.width = width\n self.target = target\n self.sum_values = {}\n self.unique_values = []\n self.start = time.time()\n self.total_width = 0\n self.seen_so_far = 0\n self.verbose = verbose\n\n def update(self, current, values=None, exact=None, strict=None):\n \"\"\"Updates the progress bar.\n Arguments\n current: Index of current step.\n values: List of tuples (name, value_for_last_step).\n The progress bar will display averages for these values.\n exact: List of tuples (name, value_for_last_step).\n The progress bar will display these values directly.\n \"\"\"\n if strict is None:\n strict = []\n if exact is None:\n exact = []\n if values is None:\n values = []\n for k, v in values:\n if type(v) == int: # for global steps\n if k not in self.sum_values:\n self.unique_values.append(k)\n self.sum_values[k] = v\n else:\n self.sum_values[k] = v\n else:\n if k not in self.sum_values:\n self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]\n self.unique_values.append(k)\n else:\n self.sum_values[k][0] += v * (current - self.seen_so_far)\n self.sum_values[k][1] += (current - self.seen_so_far)\n for k, v in exact:\n if k not in self.sum_values:\n self.unique_values.append(k)\n self.sum_values[k] = [v, 1]\n\n for k, v in strict:\n if k not in self.sum_values:\n self.unique_values.append(k)\n self.sum_values[k] = v\n\n self.seen_so_far = current\n\n now = time.time()\n if self.verbose == 1:\n prev_total_width = self.total_width\n sys.stdout.write(\"\\b\" * prev_total_width)\n sys.stdout.write(\"\\r\")\n numdigits = int(np.floor(np.log10(self.target))) + 1\n barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)\n bar = barstr % (current, self.target)\n prog = float(current)/self.target\n prog_width = int(self.width*prog)\n if prog_width > 0:\n bar += ('='*(prog_width-1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.'*(self.width-prog_width))\n bar += ']'\n sys.stdout.write(bar)\n self.total_width = len(bar)\n if current:\n time_per_unit = (now - self.start) / current\n else:\n time_per_unit = 0\n eta = time_per_unit*(self.target - current)\n info = ''\n if current < self.target:\n info += ' - ETA: %ds' % eta\n else:\n info += ' - %ds' % (now - self.start)\n for k in self.unique_values:\n if type(self.sum_values[k]) is list:\n info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))\n else:\n info += ' - %s: %s' % (k, self.sum_values[k])\n self.total_width += len(info)\n if prev_total_width > self.total_width:\n info += ((prev_total_width-self.total_width) * ' ')\n sys.stdout.write(info)\n sys.stdout.flush()\n if current >= self.target:\n sys.stdout.write(\"\\n\")\n if self.verbose == 2:\n if current >= self.target:\n info = '%ds' % (now - self.start)\n for k in self.unique_values:\n info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))\n sys.stdout.write(info + \"\\n\")\n\n def add(self, n, values=None):\n if values is None:\n values = []\n self.update(self.seen_so_far+n, values)\n" ]
[ [ "numpy.log10" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
macklenc/mtnlion
[ "ba2e93faeed3004d344a8c14f37a409da572271d" ]
[ "buildup/fenics_/phase2t/ce.py" ]
[ "import dolfin as fem\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom buildup import common, utilities\nfrom mtnlion.newman import equations\n\n\ndef run(start_time, dt, stop_time, return_comsol=False):\n time = np.arange(start_time, stop_time + dt, dt)\n dtc = fem.Constant(dt)\n cmn, domain, comsol = common.prepare_comsol_buildup()\n\n comsol_ce = utilities.interp_time(comsol.time_mesh, comsol.data.ce)\n comsol_phis = utilities.interp_time(comsol.time_mesh, comsol.data.phis)\n comsol_phie = utilities.interp_time(comsol.time_mesh, comsol.data.phie)\n comsol_cse = utilities.interp_time(comsol.time_mesh, comsol.data.cse)\n\n ce_sol = utilities.create_solution_matrices(len(time), len(comsol.mesh), 1)[0]\n ce_u = fem.TrialFunction(domain.V)\n v = fem.TestFunction(domain.V)\n\n phis_c, phie_c, ce_c_, ce_c_1, cse_c = utilities.create_functions(domain.V, 5)\n\n de_eff = cmn.fenics_params.De_eff\n Lc = cmn.fenics_params.L\n n = domain.n\n dS = domain.dS\n\n neumann = (\n de_eff(\"-\") / Lc(\"-\") * fem.inner(fem.grad(ce_c_(\"-\")), n(\"-\")) * v(\"-\") * dS(2)\n + de_eff(\"+\") / Lc(\"+\") * fem.inner(fem.grad(ce_c_(\"+\")), n(\"+\")) * v(\"+\") * dS(2)\n + de_eff(\"-\") / Lc(\"-\") * fem.inner(fem.grad(ce_c_(\"-\")), n(\"-\")) * v(\"-\") * dS(3)\n + de_eff(\"+\") / Lc(\"+\") * fem.inner(fem.grad(ce_c_(\"+\")), n(\"+\")) * v(\"+\") * dS(3)\n )\n\n # Uocp = equations.Uocp(cse_c, **cmn.fenics_params)\n Uocp = equations.Uocp_interp(\n cmn.Uocp_spline.Uocp_neg, cmn.Uocp_spline.Uocp_pos, cse_c, cmn.fenics_params.csmax, utilities\n )\n j = equations.j(\n ce_c_,\n cse_c,\n phie_c,\n phis_c,\n Uocp,\n **cmn.fenics_params,\n **cmn.fenics_consts,\n dm=domain.domain_markers,\n V=domain.V,\n )\n\n euler = equations.euler(ce_c_, ce_c_1, dtc)\n lhs, rhs1, rhs2 = equations.ce(j, ce_c_, v, **cmn.fenics_params, **cmn.fenics_consts)\n F = (lhs * euler - rhs1) * domain.dx - rhs2 * domain.dx((0, 2)) + neumann\n J = fem.derivative(F, ce_c_, ce_u)\n problem = fem.NonlinearVariationalProblem(F, ce_c_, J=J)\n solver = fem.NonlinearVariationalSolver(problem)\n\n prm = solver.parameters\n prm[\"newton_solver\"][\"absolute_tolerance\"] = 1e-8\n prm[\"newton_solver\"][\"relative_tolerance\"] = 1e-7\n prm[\"newton_solver\"][\"maximum_iterations\"] = 5000\n prm[\"newton_solver\"][\"relaxation_parameter\"] = 0.18\n\n if start_time < dt:\n ce_c_1.assign(cmn.fenics_consts.ce0)\n else:\n utilities.assign_functions([comsol_ce(start_time)], [ce_c_1], domain.V, ...)\n\n ce_sol[0, :] = utilities.get_1d(ce_c_1, domain.V)\n ce_c_.assign(ce_c_1)\n\n for k, t in enumerate(time[1:], 1):\n utilities.assign_functions(\n [comsol_cse(t), comsol_phis(t), comsol_phie(t)], [cse_c, phis_c, phie_c], domain.V, Ellipsis\n )\n\n iterations, converged = solver.solve()\n\n ce_c_1.assign(ce_c_)\n\n ce_sol[k, :] = utilities.get_1d(ce_c_, domain.V)\n print(\n \"t={time:.3f}: num iterations: {iter}, error = {error:.4e}\".format(\n time=t, iter=iterations, error=np.abs(ce_sol[k, :] - comsol_ce(t)).max()\n )\n )\n\n if return_comsol:\n return utilities.interp_time(time, ce_sol), comsol\n else:\n return utilities.interp_time(time, ce_sol)\n\n\ndef main(start_time=None, dt=None, stop_time=None, plot_time=None, get_test_stats=False):\n # Quiet\n fem.set_log_level(fem.LogLevel.ERROR)\n\n # Times at which to run solver\n if start_time is None:\n start_time = 0\n if stop_time is None:\n stop_time = 50\n if dt is None:\n dt = 0.1\n if plot_time is None:\n plot_time = np.arange(start_time, stop_time, (stop_time - start_time) / 10)\n\n ce_sol, comsol = run(start_time, dt, stop_time, return_comsol=True)\n comsol_ce = utilities.interp_time(comsol.time_mesh, comsol.data.ce)\n\n if not get_test_stats:\n utilities.report(comsol.mesh, plot_time, ce_sol(plot_time), comsol_ce(plot_time), \"$c_e$\")\n utilities.save_plot(__file__, \"plots/compare_ce_euler.png\")\n\n plt.show()\n else:\n data = utilities.generate_test_stats(plot_time, comsol, ce_sol, comsol_ce)\n\n return data\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sk1010k/nussl
[ "5dd5db75138af834c672eafb10e02f4448ddca37" ]
[ "nussl/separation/repet.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThe original REpeating Pattern Extraction Technique (REPET).\n\"\"\"\n\nimport numpy as np\nimport scipy.fftpack as scifft\nimport scipy.spatial.distance\n\nfrom . import mask_separation_base\nfrom . import masks\nfrom ..core import constants\n\n\nclass Repet(mask_separation_base.MaskSeparationBase):\n \"\"\"Implements the original REpeating Pattern Extraction Technique algorithm using the beat spectrum.\n\n REPET is a simple method for separating a repeating background from a non-repeating foreground in an\n audio mixture. It assumes a single repeating period over the whole signal duration, and finds that\n period based on finding a peak in the beat spectrum. The period can also be provided exactly, or you\n can give ``Repet`` a guess of the min and max period. Once it has a period, it \"overlays\" spectrogram\n sections of length ``period`` to create a median model (the background).\n\n References:\n * Zafar Rafii and Bryan Pardo. \"Audio Separation System and Method,\" US20130064379 A1, US 13/612,413, March 14,\n 2013\n\n See Also:\n http://music.eecs.northwestern.edu/research.php?project=repet\n :ref:`The REPET Demo Example <repet_demo>`\n :class:`separation.repet_sim.RepetSim`\n\n Parameters:\n input_audio_signal (:class:`audio_signal.AudioSignal`): The :class:`audio_signal.AudioSignal` object that\n REPET will be run on. This makes a copy of ``input_audio_signal``\n min_period (float, optional): minimum time to look for repeating period in terms of seconds.\n max_period (float, optional): maximum time to look for repeating period in terms of seconds.\n period (float, optional): exact time that the repeating period is (in seconds).\n high_pass_cutoff (float, optional): value (in Hz) for the high pass cutoff filter.\n do_mono (bool, optional): Flattens :class:`audio_signal.AudioSignal` to mono before running the \n algorithm (does not effect the input :class:`audio_signal.AudioSignal` object).\n use_find_period_complex (bool, optional): Will use a more complex peak picker to find the repeating period.\n use_librosa_stft (bool, optional): Calls librosa's stft function instead of nussl's\n matlab_fidelity (bool, optional): If True, does repet with the same settings as the original MATLAB\n implementation of REPET, warts and all. This will override ``use_librosa_stft`` and set\n it to ``False``.\n\n Examples:\n \n\n Attributes:\n background (:class:`audio_signal.AudioSignal`): Calculated background. This is ``None`` until :func:`run()` is \n called.\n foreground (:class:`audio_signal.AudioSignal`): Calculated foreground. This is ``None`` until \n :func:`make_audio_signals()` is called.\n beat_spectrum (:obj:`np.array`): Beat spectrum calculated by Repet.\n use_find_period_complex (bool): Determines whether to use complex peak picker to find the repeating period.\n repeating_period (int): Repeating period in units of hops (stft time bins)\n stft (:obj:`np.ndarray`): Local copy of the STFT input from ``input_audio_array``\n mangitude_spectrogram (:obj:`np.ndarray`): Local copy of the magnitude spectrogram\n\n \"\"\"\n def __init__(self, input_audio_signal, min_period=None, max_period=None, period=None, high_pass_cutoff=100.0,\n do_mono=False, use_find_period_complex=False,\n use_librosa_stft=constants.USE_LIBROSA_STFT, matlab_fidelity=False,\n mask_type=mask_separation_base.MaskSeparationBase.SOFT_MASK, mask_threshold=0.5):\n super(Repet, self).__init__(input_audio_signal=input_audio_signal, mask_type=mask_type,\n mask_threshold=mask_threshold)\n\n # Check input parameters\n if (min_period or max_period) and period:\n raise ValueError('Cannot set both period and (min_period or max_period)!')\n\n self.high_pass_cutoff = float(high_pass_cutoff)\n self.background = None\n self.foreground = None\n self.beat_spectrum = None\n self.use_find_period_complex = use_find_period_complex\n self.use_librosa_stft = use_librosa_stft\n\n self.repeating_period = None\n self.magnitude_spectrogram = None\n self.stft = None\n self.matlab_fidelity = matlab_fidelity\n self._is_period_converted_to_hops = False\n\n if self.matlab_fidelity:\n self.use_librosa_stft = False\n\n # TODO: stereo doesn't do true stereo REPET (see TODO below)\n if do_mono:\n self.audio_signal.to_mono(overwrite=True)\n\n # Set period parameters\n self.min_period, self.max_period, self.period = None, None, None\n if period is None:\n self.min_period = 0.8 if min_period is None else min_period\n self.max_period = min(8, self.audio_signal.signal_duration / 3) if max_period is None else max_period\n else:\n self.period = period\n if not self._is_period_converted_to_hops:\n self.period = self._update_period(self.period)\n self._is_period_converted_to_hops = True\n\n def run(self):\n \"\"\" Runs the original REPET algorithm\n\n Returns:\n masks (:obj:`MaskBase`): A :obj:`MaskBase`-derived object with repeating background time-frequency data.\n (to get the corresponding non-repeating foreground run :func:`make_audio_signals`)\n\n Example:\n \n .. code-block:: python\n :linenos:\n \n signal = nussl.AudioSignal(path_to_input_file='input_name.wav')\n\n # Set up and run Repet\n repet = nussl.Repet(signal) # Returns a soft mask by default\n masks = repet.run() # or repet()\n\n # Get audio signals\n background, foreground = repet.make_audio_signals()\n\n # output the background\n background.write_audio_to_file('background.wav')\n\n \"\"\"\n # High pass filter cutoff freq. (in # of freq. bins), +1 to match MATLAB implementation\n self.high_pass_cutoff = int(np.ceil(self.high_pass_cutoff * (self.stft_params.n_fft_bins - 1) /\n self.audio_signal.sample_rate)) + 1\n\n # the MATLAB implementation had\n low = 1 if self.matlab_fidelity else 0\n\n self._compute_spectrograms()\n self.repeating_period = self._calculate_repeating_period()\n\n # separate the mixture background by masking\n background_stft = []\n background_mask = []\n for i in range(self.audio_signal.num_channels):\n repeating_mask = self._compute_repeating_mask(self.magnitude_spectrogram[:, :, i])\n\n repeating_mask[low:self.high_pass_cutoff, :] = 1 # high-pass filter the foreground\n background_mask.append(repeating_mask)\n\n # apply mask\n stft_with_mask = repeating_mask * self.stft[:, :, i]\n background_stft.append(stft_with_mask)\n\n # make a new audio signal for the background\n background_stft = np.array(background_stft).transpose((1, 2, 0))\n self._make_background_signal(background_stft)\n\n # make a mask and return\n background_mask = np.array(background_mask).transpose((1, 2, 0))\n background_mask = masks.SoftMask(background_mask)\n if self.mask_type == self.BINARY_MASK:\n background_mask = background_mask.mask_to_binary(self.mask_threshold)\n\n self.result_masks = [background_mask, background_mask.inverse_mask()]\n\n return self.result_masks\n\n def _compute_spectrograms(self):\n self.stft = self.audio_signal.stft(overwrite=True, remove_reflection=True, use_librosa=self.use_librosa_stft)\n self.magnitude_spectrogram = np.abs(self.stft)\n\n def get_beat_spectrum(self, recompute_stft=False):\n \"\"\"Calculates and returns the beat spectrum for the audio signal associated with this object\n\n Args:\n recompute_stft (bool, Optional): Recompute the stft for the audio signal\n\n Returns:\n beat_spectrum (np.array): beat spectrum for the audio file\n\n Example:\n\n .. code-block:: python\n :linenos:\n \n # Set up audio signal\n signal = nussl.AudioSignal('path_to_file.wav')\n\n # Set up a Repet object\n repet = nussl.Repet(signal)\n\n # I don't have to run repet to get a beat spectrum for signal\n beat_spec = repet.get_beat_spectrum()\n \n \"\"\"\n if recompute_stft or self.magnitude_spectrogram is None:\n self._compute_spectrograms()\n\n # TODO: Make this multi-channel. The np.mean() reduces the n channels to 1.\n self.beat_spectrum = self.compute_beat_spectrum(np.mean(np.square(self.magnitude_spectrogram),\n axis=self.audio_signal._STFT_CHAN).T)\n return self.beat_spectrum\n\n def _calculate_repeating_period(self):\n # user provided a period, so no calculations to do\n if self.period is not None:\n return self.period\n\n # get beat spectrum\n self.beat_spectrum = self.get_beat_spectrum()\n\n if self.use_find_period_complex:\n self.repeating_period = self.find_repeating_period_complex(self.beat_spectrum)\n else:\n # update the min and max so they're in units of time bin indices\n if not self._is_period_converted_to_hops:\n self.min_period = self._update_period(self.min_period)\n self.max_period = self._update_period(self.max_period)\n self._is_period_converted_to_hops = True\n\n self.repeating_period = self.find_repeating_period_simple(self.beat_spectrum,\n self.min_period, self.max_period)\n\n return self.repeating_period\n\n @staticmethod\n def compute_beat_spectrum(power_spectrogram):\n \"\"\" Computes the beat spectrum averages (over freq's) the autocorrelation matrix of a one-sided spectrogram.\n\n The autocorrelation matrix is computed by taking the autocorrelation of each row of the spectrogram and\n dismissing the symmetric half.\n\n Args:\n power_spectrogram (:obj:`np.array`): 2D matrix containing the one-sided power spectrogram of an audio signal\n \n Returns:\n (:obj:`np.array`): array containing the beat spectrum based on the power spectrogram\n \n See Also:\n J Foote's original derivation of the Beat Spectrum: \n Foote, Jonathan, and Shingo Uchihashi. \"The beat spectrum: A new approach to rhythm analysis.\" \n Multimedia and Expo, 2001. ICME 2001. IEEE International Conference on. IEEE, 2001.\n (`See PDF here <http://rotorbrain.com/foote/papers/icme2001.pdf>`_)\n \n \"\"\"\n freq_bins, time_bins = power_spectrogram.shape\n\n # row-wise autocorrelation according to the Wiener-Khinchin theorem\n power_spectrogram = np.vstack([power_spectrogram, np.zeros_like(power_spectrogram)])\n fft_power_spec = scifft.fft(power_spectrogram, axis=0)\n abs_fft = np.abs(fft_power_spec) ** 2\n autocorrelation_rows = np.real(scifft.ifft(abs_fft, axis=0)[:freq_bins, :]) # ifft over columns\n\n # normalization factor\n norm_factor = np.tile(np.arange(freq_bins, 0, -1), (time_bins, 1)).T\n autocorrelation_rows = autocorrelation_rows / norm_factor\n\n # compute the beat spectrum\n beat_spectrum = np.mean(autocorrelation_rows, axis=1) # average over frequencies\n\n return beat_spectrum\n\n @staticmethod\n def find_repeating_period_simple(beat_spectrum, min_period, max_period):\n \"\"\"Computes the repeating period of the sound signal using the beat spectrum.\n This algorithm just looks for the max value in the interval ``[min_period, max_period]``, inclusive.\n It discards the first value, and returns the period in units of stft time bins.\n\n Parameters:\n beat_spectrum (:obj:`np.array`): input beat spectrum array\n min_period (int): minimum possible period value\n max_period (int): maximum possible period value\n \n Returns:\n period (int): The period of the sound signal in stft time bins\n \n See Also:\n :func:`find_repeating_period_complex`\n \n \"\"\"\n min_period, max_period = int(min_period), int(max_period)\n beat_spectrum = beat_spectrum[1:] # discard the first element of beat_spectrum (lag 0)\n beat_spectrum = beat_spectrum[min_period - 1: max_period]\n\n if len(beat_spectrum) == 0:\n raise RuntimeError('min_period is larger than the beat spectrum!')\n\n period = np.argmax(beat_spectrum) + min_period\n\n return period\n\n @staticmethod\n def find_repeating_period_complex(beat_spectrum):\n \"\"\" A more complicated approach to finding the repeating period. Use this by setting \n :attr:`use_find_period_complex`\n \n Args:\n beat_spectrum (:obj:`np.array`): input beat spectrum array\n\n Returns:\n period (int): The period of the sound signal in stft time bins\n \n See Also:\n :func:`find_repeating_period_simple`\n \n \"\"\"\n auto_cosine = np.zeros((len(beat_spectrum), 1))\n\n for i in range(0, len(beat_spectrum) - 1):\n auto_cosine[i] = 1 - scipy.spatial.distance.cosine(beat_spectrum[0:len(beat_spectrum) - i],\n beat_spectrum[i:len(beat_spectrum)])\n\n ac = auto_cosine[0:np.floor(auto_cosine.shape[0])/2]\n auto_cosine = np.vstack([ac[1], ac, ac[-2]])\n auto_cosine_diff = np.ediff1d(auto_cosine)\n sign_changes = auto_cosine_diff[0:-1]*auto_cosine_diff[1:]\n sign_changes = np.where(sign_changes < 0)[0]\n\n extrema_values = ac[sign_changes]\n\n e1 = np.insert(extrema_values, 0, extrema_values[0])\n e2 = np.insert(extrema_values, -1, extrema_values[-1])\n\n extrema_neighbors = np.stack((e1[0:-1], e2[1:]))\n\n m = np.amax(extrema_neighbors, axis=0)\n extrema_values = extrema_values.flatten()\n maxima = np.where(extrema_values >= m)[0]\n maxima = zip(sign_changes[maxima], extrema_values[maxima])\n maxima = maxima[1:]\n maxima = sorted(maxima, key=lambda x: -x[1])\n period = maxima[0][0]\n\n return period\n\n def _compute_repeating_mask(self, magnitude_spectrogram_channel):\n \"\"\"Computes the soft mask for the repeating part using the magnitude spectrogram and the repeating period\n\n Parameters:\n magnitude_spectrogram_channel (:obj:`np.array`): 2D matrix containing the magnitude spectrogram of a signal\n \n Returns:\n (:obj:`np.array`): 2D matrix (Lf by Lt) containing the soft mask for the repeating part, elements of M \n take on values in ``[0, 1]``\n\n \"\"\"\n # this +1 is a kluge to make this implementation match the original MATLAB implementation\n period = self.repeating_period + 1\n freq_bins, time_bins = magnitude_spectrogram_channel.shape\n n_repetitions = int(np.ceil(float(time_bins) / period))\n one_period = freq_bins * period\n\n # Pad to make an integer number of repetitions. Pad with 'nan's to not affect the median.\n remainder = (period * n_repetitions) % time_bins\n mask_reshaped = np.hstack([magnitude_spectrogram_channel, float('nan') * np.zeros((freq_bins, remainder))])\n\n # reshape to take the median of each period\n mask_reshaped = np.reshape(mask_reshaped.T, (n_repetitions, one_period))\n\n # take median of repeating periods before and after the padding\n median_mask = np.nanmedian(mask_reshaped, axis=0)\n\n # reshape to it's original shape\n median_mask = np.reshape(np.tile(median_mask, (n_repetitions, 1)), (n_repetitions * period, freq_bins)).T\n median_mask = median_mask[:, :time_bins]\n\n # take minimum of computed mask and original input and scale\n min_median_mask = np.minimum(median_mask, magnitude_spectrogram_channel)\n mask = (min_median_mask + constants.EPSILON) / (magnitude_spectrogram_channel + constants.EPSILON)\n\n return mask\n\n def update_periods(self):\n \"\"\" Will update periods for use with :func:`find_repeating_period_simple`.\n\n Updates from seconds to stft time bin values.\n Call this if you haven't done :func:`run()` or else you won't get good results.\n\n Example:\n \n .. code-block:: python\n :linenos:\n\n a = nussl.AudioSignal('path/to/file.wav')\n r = nussl.Repet(a)\n\n beat_spectrum = r.get_beat_spectrum()\n r.update_periods()\n repeating_period = r.find_repeating_period_simple(beat_spectrum, r.min_period, r.max_period)\n\n \"\"\"\n if self._is_period_converted_to_hops:\n self.period = self._update_period(self.period) if self.period is not None else None\n self.min_period = self._update_period(self.min_period) if self.min_period is not None else None\n self.max_period = self._update_period(self.max_period) if self.max_period is not None else None\n self._is_period_converted_to_hops = True\n\n def _update_period(self, period):\n period = float(period)\n result = period * self.audio_signal.sample_rate\n result += self.stft_params.window_length / self.stft_params.window_overlap - 1\n result /= self.stft_params.window_overlap\n return int(np.ceil(result))\n\n def _make_background_signal(self, background_stft):\n self.background = self.audio_signal.make_copy_with_stft_data(background_stft, verbose=False)\n self.background.istft(self.stft_params.window_length, self.stft_params.hop_length, self.stft_params.window_type,\n overwrite=True, use_librosa=self.use_librosa_stft,\n truncate_to_length=self.audio_signal.signal_length)\n\n def plot(self, output_file, **kwargs):\n \"\"\"\n Creates a plot of the beat spectrum and outputs to output_file.\n\n Parameters:\n output_file (string) : string representing a path to the desired output file to be created.\n title: (string) Title to put on the plot\n show_repeating_period: (bool) if True, then adds a vertical line where repet things\n the repeating period is (if the repeating period has been computed already)\n\n Example:\n \n .. code-block:: python\n :linenos:\n\n signal = nussl.AudioSignal('Sample.wav')\n repet = nussl.Repet(signal)\n\n repet.plot('new_beat_spec_plot.png', title=\"Beat Spectrum of Sample.wav\", show_repeating_period=True)\n \"\"\"\n import matplotlib.pyplot as plt\n plt.close('all')\n title = None\n show_repeating_period = False\n\n if len(kwargs) != 0:\n if 'title' in kwargs:\n title = kwargs['title']\n if 'show_repeating_period' in kwargs:\n show_repeating_period = kwargs['show_repeating_period']\n\n beat_spec = self.get_beat_spectrum()\n time_vect = np.linspace(0.0, self.audio_signal.signal_duration, num=len(beat_spec))\n plt.plot(time_vect, beat_spec)\n\n if self.repeating_period is not None and show_repeating_period:\n stft_vector = np.linspace(0.0, self.audio_signal.signal_duration, self.audio_signal.stft_length)\n rep = stft_vector[self.repeating_period]\n plt.plot((rep, rep), (0, np.max(beat_spec)), 'g--', label='Repeating period')\n # plt.plot((self.repeating_period, self.repeating_period), (-1e20, 1e20), 'g--')\n plt.ylim((0.0, np.max(beat_spec) * 1.1))\n\n title = title if title is not None else 'Beat Spectrum for {}'.format(self.audio_signal.file_name)\n plt.title(title)\n\n plt.xlabel('Time (s)')\n plt.ylabel('Beat Strength')\n plt.grid('on')\n\n plt.axis('tight')\n plt.savefig(output_file)\n\n def make_audio_signals(self):\n \"\"\" Returns the background and foreground audio signals. You must have run :func:`run()` prior\n to calling this function. This function will return ``None`` if :func:`run()` has not been called.\n \n Order of the list is ``[self.background, self.foreground]`` \n\n Returns:\n (list): List containing two :class:`audio_signal.AudioSignal` objects, one for the calculated background\n and the next for the remaining foreground, in that order.\n\n Example:\n \n .. code-block:: python\n :linenos:\n \n # set up AudioSignal object\n signal = nussl.AudioSignal('path_to_file.wav')\n\n # set up and run repet\n repet = nussl.Repet(signal)\n repet.run()\n\n # get audio signals (AudioSignal objects)\n background, foreground = repet.make_audio_signals()\n \n \"\"\"\n if self.background is None:\n raise ValueError('Cannot make audio signals prior to running algorithm!')\n\n foreground_array = self.audio_signal.audio_data - self.background.audio_data\n self.foreground = self.audio_signal.make_copy_with_audio_data(foreground_array)\n return [self.background, self.foreground]\n" ]
[ [ "numpy.amax", "numpy.nanmedian", "numpy.minimum", "numpy.linspace", "scipy.fftpack.fft", "matplotlib.pyplot.plot", "numpy.max", "numpy.mean", "numpy.zeros_like", "numpy.where", "numpy.square", "numpy.reshape", "numpy.arange", "numpy.stack", "numpy.ceil", "numpy.argmax", "numpy.insert", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "numpy.zeros", "scipy.fftpack.ifft", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "numpy.floor", "numpy.array", "matplotlib.pyplot.ylabel", "numpy.abs", "numpy.ediff1d", "numpy.tile", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
BrianCoyle/IsingBornMachinePseudoPublic
[ "e7e6e87fd2eeca39cea65504c8b95242f898a92f" ]
[ "param_init.py" ]
[ "## @package param_init\r\n#\r\n# Initialise some inputted variables\r\n\r\nfrom pyquil.quil import Program\r\nfrom pyquil.api import get_qc\r\nimport pyquil.paulis as pl\r\nfrom pyquil.gates import H, CPHASE, PHASE, RESET, MEASURE\r\nimport numpy as np\r\nimport random as rand\r\nfrom numpy import pi,log2\r\n\r\ndef HadamardToAll(prog, qubits):\r\n\t#qubits is an ordered list of the qubits available in the chip\r\n\tfor qubit_index in qubits:\r\n\t\tprog.inst(H(qubit_index))\r\n\treturn prog\r\n\r\n\r\n## Initialise weights and biases as random\r\n#\r\n# This function computes the initial parameter values, J, b randomly chosen on interval [0, pi/4], gamma, delta set to constant = pi/4 if untrained\r\n#\r\n# @param[in] qc The Rigetti QuantumComputer Object that is chosen, e.g. 'Aspen-1-2Q-B'\r\n#\r\n# @return initialised parameters\r\ndef NetworkParams(qc, random_seed):\r\n\r\n\tN_qubits = len(qc.qubits())\r\n\r\n #Initialise arrays for parameters\r\n\r\n\tJ \t\t\t\t\t\t\t= np.zeros((N_qubits, N_qubits))\r\n\t[b, gamma, delta, sigma]\t= [np.zeros((N_qubits)) for _ in range(4)]\r\n\r\n\r\n #Set random seed to be fixed for reproducibility, set random_seed differently depending on whether quantum data\r\n\t#is generated, or whether the actual Born machine is being used.\r\n\trand.seed(random_seed)\r\n\tfor j in range(0, N_qubits):\r\n\t\tb[j] = rand.uniform(0, pi/4)\r\n\t\t# If delta to be trained also and variable for each qubit\r\n\t\t# rand.seed(j+N)\r\n\t\t# gamma[j] = rand.uniform(0,pi/4)\r\n\t\t#If delta to be trained also and variable for each qubit\r\n\t\t#delta[j] = uniform(0,pi/4)\r\n\r\n\t\tgamma[j] = pi/4\t\t\t#If gamma constant for all qubits\r\n\t\tdelta[j] = pi/4 \t\t#If delta constant for all qubits\r\n\t\tsigma[j] = pi/4 \t\t#If sigma constant for all qubits\r\n\r\n\t\tfor i in range(0, N_qubits):\t\r\n\t\t\tif i < j:\r\n\t\t\t\tJ[i][j] = rand.uniform(0, pi/4)\r\n\t\t\t\tJ[j][i] = J[i][j]\r\n\r\n\tinitial_params = {'J': J, 'b': b, 'gamma': gamma, 'delta': delta, 'sigma': sigma}\r\n\r\n\treturn initial_params\r\n\r\ndef NetworkParamsSingleQubitGates(qc, layers):\r\n\t'''This function initilises single-qubit trainable parameters'''\r\n\r\n\tN_qubits = len(qc.qubits())\r\n\t\r\n\t#Initialise arrays for parameters\r\n\tsingle_qubit_params\t= np.zeros(N_qubits, N_qubits, N_qubits, layers) \r\n\t#layers is the number of single qubit layers, each 'layer', l, consists of three gates, R_z(\\theta_l^1)R_x(\\theta_l^2)R_x(\\theta_l^3)\r\n\r\n\t#Set random seed to be fixed for reproducibility\r\n\trand.seed(0)\r\n\tfor j in range(0, N_qubits):\r\n\t\tfor l in range(0, layers):\r\n\t\t\t#initialise all single qubit gates at random\r\n\t\t\tsingle_qubit_params[j, :, :, l] = rand.uniform(0,pi/4)\r\n\t\t\tsingle_qubit_params[:, j, :, l] = rand.uniform(0,pi/4)\r\n\t\t\tsingle_qubit_params[:, :, j, l] = rand.uniform(0,pi/4)\r\n\r\n\treturn single_qubit_params\r\n\r\n# Initialise Quantum State created after application of gate sequence\r\ndef StateInit(qc, circuit_params, p, q, r, s, circuit_choice, control, sign):\r\n\t\t'''This function computes the state produced after the given circuit, either QAOA, IQP, or IQPy,\r\n\t\tdepending on the value of circuit_choice.'''\r\n\r\n\t\t#sign = 'POSITIVE' for the positive probability version, sign = 'NEGATIVE' for the negative version of the probability (only used to compute the gradients)\r\n\t\t#final_layer is either 'IQP', 'QAOA', 'IQPy' for IQP (Final Hadamard), QAOA (Final X rotation) or IQPy (Final Y rotation)\r\n\t\t#control = 'BIAS' for updating biases, = 'WEIGHTS' for updating weights, = 'NEITHER' for neither\r\n\r\n\t\t#Initialise empty quantum program, with QuantumComputer Object, and Wavefunction Simulator\r\n\t\t'''With Active qubit reset'''\r\n\t\t# prog = Program(RESET())\r\n\t\t'''Without Active qubit reset'''\r\n\t\tprog = Program()\r\n\r\n\t\tqubits = qc.qubits()\r\n\t\tN_qubits = len(qubits)\r\n\t\t#Unpack circuit parameters from dictionary\r\n\t\tJ = circuit_params['J']\r\n\t\tb = circuit_params['b']\r\n\t\tgamma = circuit_params['gamma']\r\n\t\tdelta = circuit_params['delta']\r\n\r\n\t\t#Apply hadarmard to all qubits in computation\r\n\t\tprog = HadamardToAll(prog, qubits)\r\n\r\n\t\t# print(qc.name)\r\n\r\n\t\t#Apply Control-Phase(4J) gates to each qubit, the factor of 4 comes from the decomposition of the Ising gate\r\n\t\t#with local Z corrections to neighbouring qubits, coming from the decomposition of the Ising gate\r\n\t\t#If weight J_{p,q} is updated, add a +/- pi/2 rotation\r\n\t\tif qc.name.lower() == 'aspen-3-3q-b-qvm':\r\n\t\t\t''''Specific entanglement structure for Rigetti Aspen-3-2Q-C'''\r\n\t\t\tif (control.lower() == 'weights' and p == 0 and q == 1):\r\n\t\t\t\t#first weight parameter between qubit[1] and qubit[2]\r\n\t\t\r\n\t\t\t\tprog.inst(CPHASE(4*J[0, 1] + (-1)**(sign)*pi/2, qubits[0], qubits[1]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1] + (-1)**(sign)*pi/2, qubits[0]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1] + (-1)**(sign)*pi/2, qubits[1]))\r\n\r\n\t\t\telif (control.lower() == 'weights' and p == 1 and q == 2):\r\n\t\t\t\t#Second weight parameter between qubit[1] and qubit[2]\r\n\t\t\t\tprog.inst(CPHASE(4*J[1, 2] + (-1)**(sign)*pi/2, qubits[1], qubits[2]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[1, 2] + (-1)**(sign)*pi/2, qubits[1]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[1, 2] + (-1)**(sign)*pi/2, qubits[2]))\r\n\r\n\t\t\telif (control.lower() == 'neither' or 'bias' or 'gamma' and sign.lower() == 'neither'):\r\n\t\t\t\tprog.inst(CPHASE(4*J[0, 1], qubits[0], qubits[1]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1], qubits[0]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1], qubits[1]))\r\n\r\n\t\t\t\tprog.inst(CPHASE(4*J[1, 2], qubits[1], qubits[2]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[1, 2], qubits[1]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[1, 2], qubits[2]))\r\n\r\n\t\telif qc.name.lower() =='aspen-4-3q-a' or qc.name.lower() == 'aspen-4-3q-a-qvm':\r\n\r\n\t\t\t''''Specific entanglement structure for Rigetti Aspen-4-3Q-A\r\n\t\t\t17 - 10 - 11\r\n\t\t\t'''\r\n\t\t\tif (control.lower() == 'weights' and p == 0 and q == 1):\r\n\t\t\t\t#first weight parameter between qubit[1] and qubit[2]\r\n\t\t\r\n\t\t\t\tprog.inst(CPHASE(4*J[0, 1] + (-1)**(sign)*pi/2, qubits[0], qubits[1]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1] + (-1)**(sign)*pi/2, qubits[0]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1] + (-1)**(sign)*pi/2, qubits[1]))\r\n\r\n\t\t\telif (control.lower() == 'weights' and p == 1 and q == 2):\r\n\t\t\t\t#Second weight parameter between qubit[1] and qubit[2]\r\n\t\t\t\tprog.inst(CPHASE(4*J[0, 2] + (-1)**(sign)*pi/2, qubits[0], qubits[2]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 2] + (-1)**(sign)*pi/2, qubits[0]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 2] + (-1)**(sign)*pi/2, qubits[2]))\r\n\r\n\t\t\telif (control.lower() == 'neither' or 'bias' or 'gamma' and sign.lower() == 'neither'):\r\n\t\t\t\tprog.inst(CPHASE(4*J[0, 1], qubits[0], qubits[1]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1], qubits[0]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1], qubits[1]))\r\n\r\n\t\t\t\tprog.inst(CPHASE(4*J[0, 2], qubits[0], qubits[2]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 2], qubits[0]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 2], qubits[2]))\r\n\r\n\t\telif qc.name.lower() =='aspen-4-4q-a' or qc.name.lower() == 'aspen-4-4q-a-qvm':\r\n\t\t\t''''\r\n\t\t\tSpecific entanglement structure for Rigetti Aspen-4-4Q-A \r\n\t\t\t7 - 0 - 1 - 2\r\n\t\t\t'''\r\n\t\t\tif (control.lower() == 'weights' and p == 0 and q == 1):\r\n\t\t\t\t#first weight parameter between qubit[1] and qubit[2]\r\n\t\t\r\n\t\t\t\tprog.inst(CPHASE(4*J[0, 1] + (-1)**(sign)*pi/2, qubits[0], qubits[1]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1] + (-1)**(sign)*pi/2, qubits[0]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1] + (-1)**(sign)*pi/2, qubits[1]))\r\n\r\n\t\t\telif (control.lower() == 'weights' and p == 1 and q == 2):\r\n\t\t\t\t#Second weight parameter between qubit[1] and qubit[2]\r\n\t\t\t\tprog.inst(CPHASE(4*J[1, 2] + (-1)**(sign)*pi/2, qubits[1], qubits[2]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[1, 2] + (-1)**(sign)*pi/2, qubits[1]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[1, 2] + (-1)**(sign)*pi/2, qubits[2]))\r\n\r\n\t\t\telif (control.lower() == 'weights' and p == 0 and q == 3):\r\n\t\t\t\t#Second weight parameter between qubit[1] and qubit[2]\r\n\t\t\t\tprog.inst(CPHASE(4*J[0, 3] + (-1)**(sign)*pi/2, qubits[0], qubits[3]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 3] + (-1)**(sign)*pi/2, qubits[0]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 3] + (-1)**(sign)*pi/2, qubits[3]))\r\n\r\n\t\t\telif (control.lower() == 'neither' or 'bias' or 'gamma' and sign.lower() == 'neither'):\r\n\t\t\t\tprog.inst(CPHASE(4*J[0, 1], qubits[0], qubits[1]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1], qubits[0]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 1], qubits[1]))\r\n\r\n\t\t\t\tprog.inst(CPHASE(4*J[1, 2], qubits[1], qubits[2]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[1, 2], qubits[1]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[1, 2], qubits[2]))\r\n\t\t\t\t\r\n\t\t\t\tprog.inst(CPHASE(4*J[0, 3], qubits[0], qubits[3]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 3], qubits[0]))\r\n\t\t\t\tprog.inst(PHASE(-2*J[0, 3], qubits[3]))\r\n\t\telse:\r\n\t\t\tfor j in range(0, N_qubits):\r\n\t\t\t\tfor i in range(0, N_qubits):\r\n\t\t\t\t\t\tif (i < j): #connection is symmetric, so don't overcount entangling gates\r\n\t\t\t\t\t\t\tif (control.lower() == 'weights' and i == p and j == q):\r\n\t\t\t\t\t\t\t\tprog.inst(CPHASE(4*J[i, j] + (-1)**(sign)*pi/2, qubits[i], qubits[j]))\r\n\t\t\t\t\t\t\t\tprog.inst(PHASE(-2*J[i, j] + (-1)**(sign)*pi/2, qubits[i]))\r\n\t\t\t\t\t\t\t\tprog.inst(PHASE(-2*J[i, j] + (-1)**(sign)*pi/2, qubits[j]))\r\n\t\t\t\r\n\t\t\t\t\t\t\telif (control.lower() == 'neither' or 'bias' or 'gamma' and sign.lower() == 'neither'):\r\n\t\t\t\t\t\t\t\tprog.inst(CPHASE(4*J[i, j], qubits[i], qubits[j]))\r\n\t\t\t\t\t\t\t\tprog.inst(PHASE(-2*J[i, j], qubits[i]))\r\n\t\t\t\t\t\t\t\tprog.inst(PHASE(-2*J[i, j], qubits[j]))\t\t\r\n\r\n\t\t#Apply local Z rotations (b) to each qubit (with one phase changed by pi/2 if the corresponding parameter {r} is being updated\r\n\t\tfor j in range(0, N_qubits):\r\n\t\t\tif (control == 'BIAS' and j == r):\r\n\t\t\t\tprog.inst(PHASE(-2*b[j] +(-1)**(sign)*pi/2, qubits[j]))\r\n\t\t\telif (control== 'NEITHER' or 'WEIGHTS' or 'GAMMA' and sign == 'NEITHER'):\r\n\t\t\t\tprog.inst(PHASE(-2*b[j], \t\tqubits[j]))\r\n\t\t\t\t\r\n\t\t#Apply final 'measurement' layer to all qubits, either all Hadamard, or X or Y rotations\r\n\t\tif (circuit_choice == 'IQP'):\r\n\t\t\tprog = HadamardToAll(prog, qubits) \t#If the final 'measurement' layer is to be an IQP measurement (i.e. Hadamard on all qubits)\r\n\r\n\t\telif (circuit_choice =='QAOA'):\r\n\t\t\t#If the final 'measurement' layer is to be a QAOA measurement (i.e. e^(-i(pi/4)X_i)on all qubits)\r\n\t\t\tfor k in range(0, N_qubits):\r\n\t\t\t\t# if (control == 'GAMMA' and k == s):\r\n\t\t\t\t# \tprog.inst(pl.exponential_map(sX(k))(-float(gamma[k])+ (-1)**(sign)*pi/2))\r\n\t\r\n\t\t\t\t# elif (control == 'NEITHER' or 'WEIGHTS' or 'BIAS' and sign == 'NEITHER'):\r\n\t\t\t\tH_temp = (-float(gamma[k]))*pl.sX(qubits[k])\r\n\t\t\t\tprog.inst(pl.exponential_map(H_temp)(1.0))\r\n\t\t\t\t# print('GAMMA IS:',-float(gamma[k]))\r\n\t\telif (circuit_choice == 'IQPy' ):\r\n\t\t\t#If the final 'measurement' layer is to be a IQPy measurement (i.e. e^(-i(pi/4)Y_i) on all qubits)\r\n\t\t\tfor k in qubits:\r\n\t\t\t\tH_temp = (-float(delta[k]))*pl.sY(qubits[k])\r\n\t\t\t\tprog.inst(pl.exponential_map(H_temp)(1.0))\r\n\r\n\t\telse: raise ValueError(\"circuit_choice must be either \\\r\n\t\t\t\t\t\t\t\\'IQP\\' for IQP (Final Hadamard), \\\r\n\t\t\t\t\t\t\t\\'QAOA\\' for QAOA (Final X rotation) or \\\r\n\t\t\t\t\t\t\t\\'IQPy\\' IQPy (Final Y rotation)\")\r\n\t\t# print(prog)\r\n\t\t'''Insert explicit measure instruction if required'''\r\n\t\t# ro = prog.declare('ro', 'BIT', len(qubits))\r\n\t\t# prog.inst([MEASURE(qubit, ro[idx]) for idx, qubit in enumerate(qubits)])\r\n\r\n\t\treturn prog\r\n\r\nclass IsingBornMachine:\r\n\r\n\tdef __init__(self, qc, circuit_params, meas_choice):\r\n\r\n\t\tself.circuit = Program()\r\n\t\tself.qubits = qc.qubits()\r\n\t\t\r\n\t\tself._num_qubits = len(self.qubits)\r\n\t\tself._meas_choice = meas_choice\r\n\r\n\tdef _params(self, circuit_params):\r\n\t\t#Unpack circuit parameters from dictionary\r\n\t\tself.J = circuit_params['J']\r\n\t\tself.b = circuit_params['b']\r\n\t\tself.gamma = circuit_params['gamma']\r\n\t\tself.delta = circuit_params['delta']\r\n\r\n\tdef _hadamard_to_all(self):\r\n\t\t\"\"\"Adds Hadamard to all qubits in qubit list\"\"\"\r\n\t\tself.circuit = self.circuit + [H(qubit_index) for qubit_index in self.qubits]\r\n\t\treturn \r\n\r\n\t# for j in range(0, N_qubits):\r\n\t# \t\tfor i in range(0, N_qubits):\r\n\t# \t\t\t\tif (i < j): #connection is symmetric, so don't overcount entangling gates\r\n\t# \t\t\t\t\tif (control.lower() == 'weights' and i == p and j == q):\r\n\t# \t\t\t\t\t\tprog.inst(CPHASE(4*J[i, j] + (-1)**(sign)*pi/2, qubits[i], qubits[j]))\r\n\t# \t\t\t\t\t\tprog.inst(PHASE(-2*J[i, j] + (-1)**(sign)*pi/2, qubits[i]))\r\n\t# \t\t\t\t\t\tprog.inst(PHASE(-2*J[i, j] + (-1)**(sign)*pi/2, qubits[j]))\r\n\t\t\r\n\t# \t\t\t\t\telif (control.lower() == 'neither' or 'bias' or 'gamma' and sign.lower() == 'neither'):\r\n\t# \t\t\t\t\t\tprog.inst(CPHASE(4*J[i, j], qubits[i], qubits[j]))\r\n\t# \t\t\t\t\t\tprog.inst(PHASE(-2*J[i, j], qubits[i]))\r\n\t# \t\t\t\t\t\tprog.inst(PHASE(-2*J[i, j], qubits[j]))\t\r\n\t\r\n\tdef _measurement_layer(self, meas_choice):\r\n\t\tself._meas_choice = meas_choice\r\n\t\t#Apply final 'measurement' layer to all qubits, either all Hadamard, or X or Y rotations\r\n\t\tif (self._meas_choice.lower() == 'iqp'):\r\n\t\t\tself.circuit = self.circuit + [H(qubit_index) for qubit_index in self.qubits] \r\n\r\n\t\t# elif (circuit_choice =='QAOA'):\r\n\t\t# \t#If the final 'measurement' layer is to be a QAOA measurement (i.e. e^(-i(pi/4)X_i)on all qubits)\r\n\t\t# \tfor k in range(0, N_qubits):\r\n\t\t# \t\t# if (control == 'GAMMA' and k == s):\r\n\t\t# \t\t# \tprog.inst(pl.exponential_map(sX(k))(-float(gamma[k])+ (-1)**(sign)*pi/2))\r\n\t\r\n\t\t# \t\t# elif (control == 'NEITHER' or 'WEIGHTS' or 'BIAS' and sign == 'NEITHER'):\r\n\t\t# \t\tH_temp = (-float(gamma[k]))*pl.sX(qubits[k])\r\n\t\t# \t\tself.circuit += pl.exponential_map(H_temp)(1.0)\r\n\t\t# \t\t# print('GAMMA IS:',-float(gamma[k]))\r\n\t\t# elif (circuit_choice == 'IQPy' ):\r\n\t\t# \t#If the final 'measurement' layer is to be a IQPy measurement (i.e. e^(-i(pi/4)Y_i) on all qubits)\r\n\t\t# \tfor k in qubits:\r\n\t\t# \t\tH_temp = (-float(delta[k]))*pl.sY(qubits[k])\r\n\t\t# \t\tprog.inst(pl.exponential_map(H_temp)(1.0))\r\n\r\n# device_name = '2q-qvm'\r\n# as_qvm_value = True\r\n# qc = get_qc(device_name, as_qvm = as_qvm_value) \r\n\r\n# params = NetworkParams(qc, 123342)\r\n# ibm = IsingBornMachine(qc, params)\r\n# ibm._hadamard_to_all()\r\n# print(ibm.circuit)\r\n# ibm._measurement_layer( 'IQP')\r\n# print(ibm.circuit)" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cccaaannn/useful_functions
[ "1570cda8c642a39f04ed9f22ebeeab2bfb9e9424" ]
[ "00-modules/data_science_visualization_modules/numpy_examples.py" ]
[ "import numpy as np\n\n\n# documentation\n# https://numpy.org/doc/1.18/reference/index.html\n\n\n# load data\ndata = np.loadtxt(\"data_science-visualization/example_Data/data.txt\")\ndata_str = np.loadtxt(\"data_science-visualization/example_Data/top50info.txt\", dtype=np.str, delimiter='\\t', skiprows=1)\n\n# array\narr = np.array([2, 4, 6, 8, 10])\n\n# arange\narr = np.arange(10)\narr = np.arange(2, 10)\n\n# reshape\narr = np.arange(9).reshape((3, 3))\n\n# random\narr = np.random.rand(10)\narr = np.random.randint(5, size=(5))\narr = np.random.randint(5, size=(2, 4))\narr = np.random.permutation(10)\narr = np.random.normal(size=10) # normal distribution\nnp.random.shuffle(arr)\nnp.random.seed(42)\n\n# where\narr = np.array([1,1,3,\n 2,1,2,\n 2,2,3]).reshape((3, 3))\nindexes = np.where(arr==3)\narr = arr[np.where(arr==3)[0]] # colum location\n\n\n# select colum\narr = np.array([1,1,3,\n 2,1,2,\n 2,2,3]).reshape((3, 3))\narr = arr[:,0]\n\n# select row\narr = np.array([1,1,3,\n 2,1,2,\n 2,2,3]).reshape((3, 3))\narr = arr[0,:]\narr = arr[0]\n\n\n# sort\narr = np.random.rand(30).reshape((3, 10))\narr2 = np.sort(arr,axis=0) # sort all columns\narr2 = np.sort(arr,axis=1) # sort all rows\n\n# sort by using ONLY first column and not change the order\narr2 = arr[arr[:,0].argsort()] \n# sort by using ONLY first row and not change the order\narr2 = arr[:,arr[0].argsort()] \n\n# argsort (sync sort 2 arrays)\narr = np.array([5, 3, 1, 2, 4])\narr2 = np.array([\"e\", \"c\", \"a\", \"b\", \"d\"])\n\nsorted_indexes = arr.argsort()\narr = arr[sorted_indexes]\narr2 = arr2[sorted_indexes]\n\n\n# concatenate\narr = np.arange(10)\narr2 = np.arange(10,20)\n\narr3 = np.concatenate((arr, arr2), axis=0)\n\narr = np.arange(10).reshape((5, 2))\narr2 = np.arange(10,20).reshape((5, 2))\n\narr3 = np.concatenate((arr, arr2), axis=None) # merge without dimension\narr3 = np.concatenate((arr, arr2), axis=0) # merge colum by colum\narr3 = np.concatenate((arr, arr2), axis=1) # merge row by row\n\n\n" ]
[ [ "numpy.random.seed", "numpy.arange", "numpy.random.shuffle", "numpy.sort", "numpy.concatenate", "numpy.random.normal", "numpy.random.permutation", "numpy.random.rand", "numpy.array", "numpy.where", "numpy.loadtxt", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ishinomori/PlasmaPy
[ "46a77797ff64d61d4215f5b686687359f892d9dd" ]
[ "plasmapy/formulary/parameters.py" ]
[ "\"\"\"\nThis module gathers basic and general plasma parameters such as the\nplasma frequency or Debye length.\n\"\"\"\n__all__ = [\n \"mass_density\",\n \"Alfven_speed\",\n \"ion_sound_speed\",\n \"thermal_speed\",\n \"thermal_pressure\",\n \"kappa_thermal_speed\",\n \"Hall_parameter\",\n \"gyrofrequency\",\n \"gyroradius\",\n \"plasma_frequency\",\n \"Debye_length\",\n \"Debye_number\",\n \"inertial_length\",\n \"magnetic_pressure\",\n \"magnetic_energy_density\",\n \"upper_hybrid_frequency\",\n \"lower_hybrid_frequency\",\n]\n\nimport numbers\nimport numpy as np\nimport warnings\n\nfrom astropy import units as u\nfrom astropy.constants.si import (m_p, m_e, c, mu0, k_B, e, eps0)\nfrom plasmapy import atomic\nfrom plasmapy.utils import PhysicsError\nfrom plasmapy.utils.decorators import (angular_freq_to_hz, check_relativistic, validate_quantities)\nfrom plasmapy.utils.exceptions import PhysicsWarning\nfrom typing import Optional\n\n\ndef _grab_charge(ion, z_mean=None):\n \"\"\"Utility function to merge two possible inputs for particle charge.\n\n Parameters\n ----------\n ion : str or `plasmapy.atomic.Particle`\n a string representing a charged particle, or a Particle object.\n\n z_mean : float\n An optional float describing the average ionization of a particle\n species.\n\n Returns\n -------\n float\n if `z_mean` was passed, `z_mean`, otherwise, the integer charge\n of the `ion`.\n\n \"\"\"\n if z_mean is None:\n # warnings.warn(\"No z_mean given, defaulting to atomic charge\",\n # PhysicsWarning)\n Z = atomic.integer_charge(ion)\n else:\n # using average ionization provided by user\n Z = z_mean\n return Z\n\n\n@validate_quantities(density={'can_be_negative': False},\n validations_on_return={'can_be_negative': False})\ndef mass_density(density: [u.m ** -3, u.kg / (u.m ** 3)],\n particle: Optional[str] = None,\n z_mean: Optional[numbers.Real] = None) -> u.kg / u.m ** 3:\n \"\"\"Utility function to merge two possible inputs for particle charge.\n\n Parameters\n ----------\n density : ~astropy.units.Quantity\n Either a particle density (number of particles per unit volume, in units\n of 1/m^3) or a mass density (in units of kg/m^3 or equivalent).\n\n particle : str, optional\n Representation of the particle species (e.g., `'p'` for protons, `'D+'`\n for deuterium, or `'He-4 +1'` for singly ionized helium-4),\n which defaults to electrons. If no charge state information is\n provided, then the particles are assumed to be singly charged.\n\n z_mean : float\n An optional float describing the average ionization of a particle\n species.\n\n Raises\n ------\n ValueError\n If the `density` has units inconvertible to either a particle density\n or a mass density, or if you pass in a number density without a particle.\n\n Returns\n -------\n ~astropy.units.Quantity\n The mass density calculated from all the provided sources of information.\n\n Examples\n -------\n >>> from astropy import units as u\n >>> mass_density(1 * u.m ** -3,'p')\n <Quantity 1.67353...e-27 kg / m3>\n >>> mass_density(4 * u.m ** -3,'D+')\n <Quantity 1.33779...e-26 kg / m3>\n\n \"\"\"\n # validate_quantities ensures we have units of u.kg/u.m**3 or 1/u.m**3\n rho = density\n if not rho.unit.is_equivalent(u.kg / u.m ** 3):\n if particle:\n m_i = atomic.particle_mass(particle)\n Z = _grab_charge(particle, z_mean)\n rho = density * m_i + Z * density * m_e\n else:\n raise ValueError(f\"If passing a number density, you must pass a \"\n f\"particle (not {particle}) to calculate the mass density!\")\n\n return rho\n\n\n@check_relativistic\n@validate_quantities(density={'can_be_negative': False})\ndef Alfven_speed(B: u.T,\n density: [u.m ** -3, u.kg / u.m ** 3],\n ion=\"p+\",\n z_mean=None) -> u.m / u.s:\n r\"\"\"\n Return the Alfvén speed.\n\n Parameters\n ----------\n B : ~astropy.units.Quantity\n The magnetic field magnitude in units convertible to tesla.\n\n density : ~astropy.units.Quantity\n Either the ion number density in units convertible to 1 / m**3,\n or the mass density in units convertible to kg / m**3.\n\n ion : str, optional\n Representation of the ion species (e.g., `'p'` for protons,\n `'D+'` for deuterium, or `'He-4 +1'` for singly ionized\n helium-4), which defaults to protons. If no charge state\n information is provided, then the ions are assumed to be\n singly charged.\n\n z_mean : ~astropy.units.Quantity, optional\n The average ionization (arithmetic mean) for a plasma where the\n a macroscopic description is valid. If this quantity is not\n given then the atomic charge state (integer) of the ion\n is used. This is effectively an average Alfven speed for the\n plasma where multiple charge states are present.\n\n Returns\n -------\n V_A : ~astropy.units.Quantity with units of speed\n The Alfvén speed of the plasma in units of meters per second.\n\n Raises\n ------\n TypeError\n The magnetic field and density arguments are not instances of\n `~astropy.units.Quantity` and cannot be converted into those.\n\n ~astropy.units.UnitConversionError\n If the magnetic field or density is not in appropriate units.\n\n ~plasmapy.utils.RelativityError\n If the Alfven velocity is greater than or equal to the speed of light\n\n ValueError\n If the density is negative, or the ion mass or charge state\n cannot be found.\n\n Warns\n -----\n ~plasmapy.utils.RelativityWarning\n If the Alfven velocity exceeds 5% of the speed of light\n\n ~astropy.units.UnitsWarning\n if units are not provided, SI units are assumed.\n\n Notes\n -----\n The Alfven speed :math:`V_A` is the typical propagation speed\n of magnetic disturbances in a plasma, and is given by:\n\n .. math::\n\n V_A = \\frac{B}{\\sqrt{\\mu_0\\rho}}\n\n where the mass density is :math:`\\rho = n_i m_i + n_e m_e`.\n\n This expression does not account for relativistic effects, and\n loses validity when the resulting speed is a significant fraction\n of the speed of light.\n\n Examples\n --------\n >>> from astropy import units as u\n >>> from astropy.constants.si import m_p, m_e\n >>> B = 0.014*u.T\n >>> n = 5e19*u.m**-3\n >>> rho = n*(m_p+m_e)\n >>> ion = 'p'\n >>> Alfven_speed(B, n, ion)\n <Quantity 43173.870... m / s>\n >>> Alfven_speed(B, rho, ion)\n <Quantity 43173.870... m / s>\n >>> Alfven_speed(B, rho, ion).to(u.cm/u.us)\n <Quantity 4.31738... cm / us>\n\n \"\"\"\n rho = mass_density(density, ion, z_mean)\n\n V_A = (np.abs(B) / np.sqrt(mu0 * rho))\n return V_A\n\n\n@check_relativistic\n@validate_quantities(T_i={'can_be_negative': False,\n 'equivalencies': u.temperature_energy()},\n T_e={'can_be_negative': False,\n 'equivalencies': u.temperature_energy()},\n n_e={'can_be_negative': False,\n 'none_shall_pass': True},\n k={'can_be_negative': False,\n 'none_shall_pass': True})\ndef ion_sound_speed(T_e: u.K,\n T_i: u.K,\n n_e: u.m ** -3 = None,\n k: u.m ** -1 = None,\n gamma_e=1,\n gamma_i=3,\n ion='p+',\n z_mean=None) -> u.m / u.s:\n r\"\"\"\n Return the ion sound speed for an electron-ion plasma.\n\n Parameters\n ----------\n T_e : ~astropy.units.Quantity\n Electron temperature in units of temperature or energy per\n particle. If this is not given, then the electron temperature\n is assumed to be zero.\n\n T_i : ~astropy.units.Quantity\n Ion temperature in units of temperature or energy per\n particle. If this is not given, then the ion temperature is\n assumed to be zero.\n\n n_e : ~astropy.units.Quantity\n Electron number density. If this is not given, then ion_sound_speed\n will be approximated in the non-dispersive limit\n (:math:`k^2 \\lambda_{D}^2` will be assumed zero). If n_e is given,\n a value for k must also be given.\n\n k : ~astropy.units.Quantity\n Wavenumber (in units of inverse length, e.g. per meter). If this\n is not given, then ion_sound_speed will be approximated in the\n non-dispersive limit (:math:`k^2 \\lambda_{D}^2` will be assumed zero).\n If k is given, a value for n_e must also be given.\n\n gamma_e : float or int\n The adiabatic index for electrons, which defaults to 1. This\n value assumes that the electrons are able to equalize their\n temperature rapidly enough that the electrons are effectively\n isothermal.\n\n gamma_i : float or int\n The adiabatic index for ions, which defaults to 3. This value\n assumes that ion motion has only one degree of freedom, namely\n along magnetic field lines.\n\n ion : str, optional\n Representation of the ion species (e.g., `'p'` for protons,\n `'D+'` for deuterium, or 'He-4 +1' for singly ionized\n helium-4), which defaults to protons. If no charge state\n information is provided, then the ions are assumed to be\n singly charged.\n\n z_mean : ~astropy.units.Quantity, optional\n The average ionization (arithmetic mean) for a plasma where the\n a macroscopic description is valid. If this quantity is not\n given then the atomic charge state (integer) of the ion\n is used. This is effectively an average ion sound speed for the\n plasma where multiple charge states are present.\n\n Returns\n -------\n V_S : ~astropy.units.Quantity\n The ion sound speed in units of meters per second.\n\n Raises\n ------\n TypeError\n If any of the arguments are not entered as keyword arguments\n or are of an incorrect type.\n\n ValueError\n If the ion mass, adiabatic index, or temperature are invalid.\n\n ~plasmapy.utils.PhysicsError\n If an adiabatic index is less than one.\n\n ~astropy.units.UnitConversionError\n If the temperature, electron number density, or wavenumber\n is in incorrect units.\n\n Warns\n -----\n RelativityWarning\n If the ion sound speed exceeds 5% of the speed of light.\n\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed.\n\n PhysicsWarning\n If only one of (k, n_e) is given, the non-dispersive limit\n is assumed.\n\n Notes\n -----\n The ion sound speed :math:`V_S` is given by\n\n .. math::\n\n V_S = \\sqrt{\\frac{\\gamma_e Z k_B T_e + \\gamma_i k_B T_i}{m_i (1 + k^2 \\lambda_{D}^2)}}\n\n where :math:`\\gamma_e` and :math:`\\gamma_i` are the electron and\n ion adiabatic indices, :math:`k_B` is the Boltzmann constant,\n :math:`T_e` and :math:`T_i` are the electron and ion temperatures,\n :math:`Z` is the charge state of the ion, :math:`m_i` is the\n ion mass, :math:`\\lambda_{D}` is the Debye length, and :math:`k` is the\n wavenumber.\n\n In the non-dispersive limit (:math:`k^2 \\lambda_{D}^2` is small) the\n equation for :math:`V_S` is approximated (the denominator reduces\n to :math:`m_i`).\n\n When the electron temperature is much greater than the ion\n temperature, the ion sound velocity reduces to\n :math:`\\sqrt{\\gamma_e k_B T_e / m_i}`. Ion acoustic waves can\n therefore occur even when the ion temperature is zero.\n\n Example\n -------\n >>> from astropy import units as u\n >>> n = 5e19*u.m**-3\n >>> k_1 = 3e1*u.m**-1\n >>> k_2 = 3e7*u.m**-1\n >>> ion_sound_speed(T_e=5e6*u.K, T_i=0*u.K, ion='p', gamma_e=1, gamma_i=3)\n <Quantity 203155... m / s>\n >>> ion_sound_speed(T_e=5e6*u.K, T_i=0*u.K, n_e=n, k=k_1, ion='p', gamma_e=1, gamma_i=3)\n <Quantity 203155... m / s>\n >>> ion_sound_speed(T_e=5e6*u.K, T_i=0*u.K, n_e=n, k=k_2, ion='p', gamma_e=1, gamma_i=3)\n <Quantity 310.31... m / s>\n >>> ion_sound_speed(T_e=5e6*u.K, T_i=0*u.K, n_e=n, k=k_1)\n <Quantity 203155... m / s>\n >>> ion_sound_speed(T_e=500*u.eV, T_i=200*u.eV, n_e=n, k=k_1, ion='D+')\n <Quantity 229585... m / s>\n\n \"\"\"\n\n m_i = atomic.particle_mass(ion)\n Z = _grab_charge(ion, z_mean)\n\n for gamma, particles in zip([gamma_e, gamma_i], [\"electrons\", \"ions\"]):\n if not isinstance(gamma, (numbers.Real, numbers.Integral)):\n raise TypeError(f\"The adiabatic index gamma for {particles} must be \"\n \"a float or int\")\n if gamma < 1:\n raise PhysicsError(f\"The adiabatic index for {particles} must be between \"\n f\"one and infinity\")\n\n # Assume non-dispersive limit if values for n_e (or k) are not specified\n klD2 = 0.0\n if (n_e is None) ^ (k is None):\n warnings.warn(\"The non-dispersive limit has been assumed for \"\n \"this calculation. To prevent this, values must \"\n \"be specified for both n_e and k.\", PhysicsWarning)\n elif n_e is not None and k is not None:\n lambda_D = Debye_length(T_e, n_e)\n klD2 = (k * lambda_D) ** 2\n\n try:\n V_S_squared = (gamma_e * Z * k_B * T_e + gamma_i * k_B * T_i) / (m_i * (1 + klD2))\n V_S = np.sqrt(V_S_squared).to(u.m / u.s)\n except Exception:\n raise ValueError(\"Unable to find ion sound speed.\")\n\n return V_S\n\n\n@check_relativistic\n@validate_quantities(T={'can_be_negative': False,\n 'equivalencies': u.temperature_energy()},\n mass={'can_be_negative': False, 'can_be_nan': True})\[email protected]_input\ndef thermal_speed(T: u.K,\n particle: atomic.Particle = \"e-\",\n method=\"most_probable\",\n mass: u.kg = np.nan * u.kg) -> u.m / u.s:\n r\"\"\"\n Return the most probable speed for a particle within a Maxwellian\n distribution.\n\n Parameters\n ----------\n T : ~astropy.units.Quantity\n The particle temperature in either kelvin or energy per particle\n\n particle : str, optional\n Representation of the particle species (e.g., `'p'` for protons, `'D+'`\n for deuterium, or `'He-4 +1'` for singly ionized helium-4),\n which defaults to electrons. If no charge state information is\n provided, then the particles are assumed to be singly charged.\n\n method : str, optional\n Method to be used for calculating the thermal speed. Options are\n `'most_probable'` (default), `'rms'`, and `'mean_magnitude'`.\n\n mass : ~astropy.units.Quantity\n The particle's mass override. Defaults to NaN and if so, doesn't do\n anything, but if set, overrides mass acquired from `particle`. Useful\n with relative velocities of particles.\n\n Returns\n -------\n V : ~astropy.units.Quantity\n particle thermal speed\n\n Raises\n ------\n TypeError\n The particle temperature is not a ~astropy.units.Quantity\n\n ~astropy.units.UnitConversionError\n If the particle temperature is not in units of temperature or\n energy per particle\n\n ValueError\n The particle temperature is invalid or particle cannot be used to\n identify an isotope or particle\n\n Warns\n -----\n RelativityWarning\n If the ion sound speed exceeds 5% of the speed of light, or\n\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed.\n\n Notes\n -----\n The particle thermal speed is given by:\n\n .. math::\n V_{th,i} = \\sqrt{\\frac{2 k_B T_i}{m_i}}\n\n This function yields the most probable speed within a distribution\n function. However, the definition of thermal velocity varies by\n the square root of two depending on whether or not this velocity\n absorbs that factor in the expression for a Maxwellian\n distribution. In particular, the expression given in the NRL\n Plasma Formulary [1] is a square root of two smaller than the\n result from this function.\n\n Examples\n --------\n >>> from astropy import units as u\n >>> thermal_speed(5*u.eV, 'p')\n <Quantity 30949.6... m / s>\n >>> thermal_speed(1e6*u.K, particle='p')\n <Quantity 128486... m / s>\n >>> thermal_speed(5*u.eV, particle='e-')\n <Quantity 132620... m / s>\n >>> thermal_speed(1e6*u.K, particle='e-')\n <Quantity 550569... m / s>\n >>> thermal_speed(1e6*u.K, method=\"rms\")\n <Quantity 674307... m / s>\n >>> thermal_speed(1e6*u.K, method=\"mean_magnitude\")\n <Quantity 621251... m / s>\n\n \"\"\"\n m = mass if np.isfinite(mass) else atomic.particle_mass(particle)\n\n # different methods, as per https://en.wikipedia.org/wiki/Thermal_velocity\n if method == \"most_probable\":\n V = (np.sqrt(2 * k_B * T / m))\n elif method == \"rms\":\n V = (np.sqrt(3 * k_B * T / m))\n elif method == \"mean_magnitude\":\n V = (np.sqrt(8 * k_B * T / (m * np.pi)))\n else:\n raise ValueError(\"Method {method} not supported in thermal_speed\")\n\n return V\n\n\n@validate_quantities(T={'can_be_negative': False,\n 'equivalencies': u.temperature_energy()},\n n={'can_be_negative': False})\ndef thermal_pressure(T: u.K, n: u.m ** -3) -> u.Pa:\n r\"\"\"\n Return the thermal pressure for a Maxwellian distribution.\n\n Parameters\n ----------\n T : ~astropy.units.Quantity\n The particle temperature in either kelvin or energy per particle\n\n n : ~astropy.units.Quantity\n The particle number density in units convertible to m**-3.\n\n Examples\n --------\n >>> import astropy.units as u\n >>> thermal_pressure(1*u.eV, 1e20/u.m**3)\n <Quantity 16.021... Pa>\n >>> thermal_pressure(10*u.eV, 1e20/u.m**3)\n <Quantity 160.21... Pa>\n\n Returns\n -------\n p_th : ~astropy.units.Quantity\n Thermal pressure.\n\n Raises\n ------\n TypeError\n The temperature or number density is not a `~astropy.units.Quantity`.\n\n ~astropy.units.UnitConversionError\n If the particle temperature is not in units of temperature or\n energy per particle.\n\n Notes\n -----\n The thermal pressure is given by:\n\n .. math::\n T_{th} = nk_{B}T\n \"\"\"\n\n return n * k_B * T\n\n\n@check_relativistic\n@validate_quantities(T={'can_be_negative': False,\n 'equivalencies': u.temperature_energy()})\ndef kappa_thermal_speed(T: u.K, kappa, particle=\"e-\", method=\"most_probable\") -> u.m / u.s:\n r\"\"\"Return the most probable speed for a particle within a Kappa\n distribution.\n\n Parameters\n ----------\n T : ~astropy.units.Quantity\n The particle temperature in either kelvin or energy per particle\n\n kappa: float\n The kappa parameter is a dimensionless number which sets the slope\n of the energy spectrum of suprathermal particles forming the tail\n of the Kappa velocity distribution function. Kappa must be greater\n than 3/2.\n\n particle : str, optional\n Representation of the particle species (e.g., 'p' for protons, 'D+'\n for deuterium, or 'He-4 +1' for singly ionized helium-4),\n which defaults to electrons. If no charge state information is\n provided, then the particles are assumed to be singly charged.\n\n method : str, optional\n Method to be used for calculating the thermal speed. Options are\n 'most_probable' (default), 'rms', and 'mean_magnitude'.\n\n Returns\n -------\n V : ~astropy.units.Quantity\n Particle thermal speed\n\n Raises\n ------\n TypeError\n The particle temperature is not a ~astropy.units.Quantity.\n\n astropy.units.UnitConversionError\n If the particle temperature is not in units of temperature or\n energy per particle.\n\n ValueError\n The particle temperature is invalid or particle cannot be used to\n identify an isotope or particle.\n\n Warns\n -----\n RelativityWarning\n If the particle thermal speed exceeds 5% of the speed of light, or\n\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed.\n\n Notes\n -----\n The particle thermal speed is given by:\n\n .. math::\n V_{th,i} = \\sqrt{(2 \\kappa - 3)\\frac{2 k_B T_i}{\\kappa m_i}}\n\n For more discussion on the mean_magnitude calculation method, see [1]_.\n\n\n Examples\n --------\n >>> from astropy import units as u\n >>> kappa_thermal_speed(5*u.eV, 4, 'p') # defaults to most probable\n <Quantity 24467.87... m / s>\n >>> kappa_thermal_speed(5*u.eV, 4, 'p', 'rms')\n <Quantity 37905.47... m / s>\n >>> kappa_thermal_speed(5*u.eV, 4, 'p', 'mean_magnitude')\n <Quantity 34922.98... m / s>\n\n References\n ----------\n .. [1] PlasmaPy Issue #186, https://github.com/PlasmaPy/PlasmaPy/issues/186\n\n See Also\n --------\n plasmapy.formulary.kappa_thermal_speed\n plasmapy.formulary.kappa_velocity_1D\n \"\"\"\n # Checking thermal units\n if kappa <= 3 / 2:\n raise ValueError(f\"Must have kappa > 3/2, instead of {kappa}, for \"\n \"kappa distribution function to be valid.\")\n # different methods, as per https://en.wikipedia.org/wiki/Thermal_velocity\n vTh = thermal_speed(T=T,\n particle=particle,\n method=method)\n\n if method == \"most_probable\":\n # thermal velocity of Kappa distribution function is just Maxwellian\n # thermal speed modulated by the following factor.\n # This is only true for \"most probable\" case. RMS and mean\n # magnitude velocities are same as Maxwellian.\n coeff = np.sqrt((kappa - 3 / 2) / kappa)\n else:\n coeff = 1\n\n return vTh * coeff\n\n\n@validate_quantities(n={'can_be_negative': False},\n T={'can_be_negative': False,\n 'equivalencies': u.temperature_energy()})\ndef Hall_parameter(n: u.m ** -3,\n T: u.K,\n B: u.T,\n ion_particle,\n particle='e-',\n coulomb_log=None,\n V=None,\n coulomb_log_method=\"classical\"):\n r\"\"\"Calculate the ratio between the particle gyrofrequency and the\n particle-ion particle collision rate.\n\n All parameters apply to `particle`.\n\n Parameters\n ----------\n n : ~astropy.units.quantity.Quantity\n The density of particle s\n T : ~astropy.units.quantity.Quantity\n The temperature of particles\n B : ~astropy.units.quantity.Quantity\n The magnetic field\n ion_particle : str\n String signifying the type of ion.\n particle : str, optional\n String signifying the type of particles. Defaults to electrons.\n coulomb_log : float, optional\n Preset value for the Coulomb logarithm. Used mostly for testing purposes.\n V : ~astropy.units.quantity.Quantity\n The relative velocity between `particle` and ion particles.\n coulomb_log_method : str, optional\n Method used for Coulomb logarithm calculation. Refer to its documentation.\n\n See Also\n --------\n plasmapy.formulary.parameters.gyrofrequency\n plasmapy.formulary.parameters.fundamental_electron_collision_freq\n plasmapy.formulary.collisions.Coulomb_logarithm\n\n Returns\n -------\n astropy.units.quantity.Quantity\n\n Examples\n --------\n >>> from astropy import units as u\n >>> Hall_parameter(1e10 * u.m**-3, 2.8e3 * u.eV, 2.3 * u.T, 'He-4 +1')\n <Quantity 7.26446...e+16>\n >>> Hall_parameter(1e10 * u.m**-3, 5.8e3 * u.eV, 2.3 * u.T, 'He-4 +1')\n <Quantity 2.11158...e+17>\n\n \"\"\"\n from plasmapy.formulary.collisions import (fundamental_ion_collision_freq,\n fundamental_electron_collision_freq)\n gyro_frequency = gyrofrequency(B, particle)\n gyro_frequency = gyro_frequency / u.radian\n if atomic.Particle(particle).particle == 'e-':\n coll_rate = fundamental_electron_collision_freq(T,\n n,\n ion_particle,\n coulomb_log,\n V,\n coulomb_log_method=coulomb_log_method)\n else:\n coll_rate = fundamental_ion_collision_freq(T, n, ion_particle, coulomb_log, V)\n return gyro_frequency / coll_rate\n\n\n@validate_quantities(validations_on_return={'units': [u.rad / u.s, u.Hz],\n 'equivalencies': [(u.cy / u.s, u.Hz)]})\n@angular_freq_to_hz\ndef gyrofrequency(B: u.T, particle='e-', signed=False, Z=None) -> u.rad / u.s:\n r\"\"\"Calculate the particle gyrofrequency in units of radians per second.\n\n Parameters\n ----------\n B : ~astropy.units.Quantity\n The magnetic field magnitude in units convertible to tesla.\n\n particle : str, optional\n Representation of the particle species (e.g., 'p' for protons, 'D+'\n for deuterium, or 'He-4 +1' for singly ionized helium-4),\n which defaults to electrons. If no charge state information is\n provided, then the particles are assumed to be singly charged.\n\n signed : bool, optional\n The gyrofrequency can be defined as signed (negative for electron,\n positive for ion). Default is `False` (unsigned, i.e. always\n positive).\n\n Z : float or ~astropy.units.Quantity, optional\n The average ionization (arithmetic mean) for a plasma where the\n a macroscopic description is valid. If this quantity is not\n given then the atomic charge state (integer) of the ion\n is used. This is effectively an average gyrofrequency for the\n plasma where multiple charge states are present, and should\n not be interpreted as the gyrofrequency for any single particle.\n If not provided, it defaults to the integer charge of the `particle`.\n\n Returns\n -------\n omega_c : ~astropy.units.Quantity\n The particle gyrofrequency in units of radians per second\n\n Raises\n ------\n TypeError\n If the magnetic field is not a `Quantity` or particle is not of an\n appropriate type\n\n ValueError\n If the magnetic field contains invalid values or particle cannot be\n used to identify an particle or isotope\n\n Warns\n -----\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed\n\n Notes\n -----\n The particle gyrofrequency is the angular frequency of particle gyration\n around magnetic field lines and is given by:\n\n .. math::\n \\omega_{ci} = \\frac{Z e B}{m_i}\n\n The particle gyrofrequency is also known as the particle cyclotron\n frequency or the particle Larmor frequency.\n\n The recommended way to convert from angular frequency to frequency\n is to use an equivalency between cycles per second and Hertz, as\n Astropy's `dimensionles_angles` equivalency does not account for\n the factor of 2*pi needed during this conversion. The\n `dimensionless_angles` equivalency is appropriate when dividing a\n velocity by an angular frequency to get a length scale.\n\n Examples\n --------\n >>> from astropy import units as u\n >>> gyrofrequency(0.1*u.T)\n <Quantity 1.7588...e+10 rad / s>\n >>> gyrofrequency(0.1*u.T, to_hz=True)\n <Quantity 2.79924...e+09 Hz>\n >>> gyrofrequency(0.1*u.T, signed=True)\n <Quantity -1.75882...e+10 rad / s>\n >>> gyrofrequency(0.01*u.T, 'p')\n <Quantity 957883.32... rad / s>\n >>> gyrofrequency(0.01*u.T, 'p', signed=True)\n <Quantity 957883.32... rad / s>\n >>> gyrofrequency(0.01*u.T, particle='T+')\n <Quantity 319964.5... rad / s>\n >>> gyrofrequency(0.01*u.T, particle='T+', to_hz=True)\n <Quantity 50923.9... Hz>\n >>> omega_ce = gyrofrequency(0.1*u.T)\n >>> print(omega_ce)\n 1758820... rad / s\n >>> f_ce = omega_ce.to(u.Hz, equivalencies=[(u.cy/u.s, u.Hz)])\n >>> print(f_ce)\n 279924... Hz\n\n \"\"\"\n m_i = atomic.particle_mass(particle)\n Z = _grab_charge(particle, Z)\n if not signed:\n Z = abs(Z)\n\n omega_ci = u.rad * (Z * e * np.abs(B) / m_i).to(1 / u.s)\n\n return omega_ci\n\n\n@validate_quantities(Vperp={'can_be_nan': True},\n T_i={'can_be_nan': True,\n 'equivalencies': u.temperature_energy()},\n validations_on_return={'equivalencies': u.dimensionless_angles()})\ndef gyroradius(B: u.T,\n particle='e-',\n *,\n Vperp: u.m / u.s = np.nan * u.m / u.s,\n T_i: u.K = np.nan * u.K) -> u.m:\n r\"\"\"Return the particle gyroradius.\n\n Parameters\n ----------\n B : ~astropy.units.Quantity\n The magnetic field magnitude in units convertible to tesla.\n\n particle : str, optional\n Representation of the particle species (e.g., `'p'` for protons, `'D+'`\n for deuterium, or `'He-4 +1'` for singly ionized helium-4),\n which defaults to electrons. If no charge state information is\n provided, then the particles are assumed to be singly charged.\n\n Vperp : ~astropy.units.Quantity, optional\n The component of particle velocity that is perpendicular to the\n magnetic field in units convertible to meters per second.\n Must be input as a keyword argument.\n\n T_i : ~astropy.units.Quantity, optional\n The particle temperature in units convertible to kelvin.\n Must be input as a keyword argument.\n\n Returns\n -------\n r_Li : ~astropy.units.Quantity\n The particle gyroradius in units of meters. This\n ~astropy.units.Quantity will be based on either the\n perpendicular component of particle velocity as inputted, or\n the most probable speed for an particle within a Maxwellian\n distribution for the particle temperature.\n\n Raises\n ------\n TypeError\n The arguments are of an incorrect type\n\n ~astropy.units.UnitConversionError\n The arguments do not have appropriate units\n\n ValueError\n If any argument contains invalid values\n\n Warns\n -----\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed\n\n Notes\n -----\n One but not both of `Vperp` and `T_i` must be inputted.\n\n If any of `B`, `Vperp`, or `T_i` is a number rather than a\n `~astropy.units.Quantity`, then SI units will be assumed and a\n warning will be raised.\n\n The particle gyroradius is also known as the particle Larmor\n radius and is given by\n\n .. math::\n r_{Li} = \\frac{V_{\\perp}}{omega_{ci}}\n\n where :math:`V_{\\perp}` is the component of particle velocity that is\n perpendicular to the magnetic field and :math:`\\omega_{ci}` is the\n particle gyrofrequency. If a temperature is provided, then\n :math:`V_\\perp` will be the most probable thermal velocity of an\n particle at that temperature.\n\n Examples\n --------\n >>> from astropy import units as u\n >>> gyroradius(0.2*u.T,particle='p+',T_i=1e5*u.K)\n <Quantity 0.002120... m>\n >>> gyroradius(0.2*u.T,particle='p+',T_i=1e5*u.K)\n <Quantity 0.002120... m>\n >>> gyroradius(5*u.uG,particle='alpha',T_i=1*u.eV)\n <Quantity 288002.38... m>\n >>> gyroradius(400*u.G,particle='Fe+++',Vperp=1e7*u.m/u.s)\n <Quantity 48.23129... m>\n >>> gyroradius(B=0.01*u.T,T_i=1e6*u.K)\n <Quantity 0.003130... m>\n >>> gyroradius(B=0.01*u.T,Vperp=1e6*u.m/u.s)\n <Quantity 0.000568... m>\n >>> gyroradius(0.2*u.T,T_i=1e5*u.K)\n <Quantity 4.94949...e-05 m>\n >>> gyroradius(5*u.uG,T_i=1*u.eV)\n <Quantity 6744.25... m>\n >>> gyroradius(400*u.G,Vperp=1e7*u.m/u.s)\n <Quantity 0.001421... m>\n\n \"\"\"\n\n isfinite_Ti = np.isfinite(T_i)\n isfinite_Vperp = np.isfinite(Vperp)\n\n # check 1: ensure either Vperp or T_i invalid, keeping in mind that\n # the underlying values of the astropy quantity may be numpy arrays\n if np.any(np.logical_not(np.logical_xor(isfinite_Vperp, isfinite_Ti))):\n raise ValueError(\"Must give Vperp or T_i, but not both, as arguments to gyroradius\")\n\n # check 2: get Vperp as the thermal speed if is not already a valid input\n if np.isscalar(Vperp.value) and np.isscalar(T_i.value): # both T_i and Vperp are scalars\n # we know exactly one of them is nan from check 1\n if isfinite_Ti:\n # T_i is valid, so use it to determine Vperp\n Vperp = thermal_speed(T_i, particle=particle)\n # else: Vperp is alread valid, do nothing\n elif np.isscalar(Vperp.value): # only T_i is an array\n # this means either Vperp must be nan, or T_i must be array of all nan,\n # or else we couldn't have gotten through check 1\n if isfinite_Vperp:\n # Vperp is valid, T_i is a vector that is all nan\n # uh...\n Vperp = np.repeat(Vperp, len(T_i))\n else:\n # normal case where Vperp is scalar nan and T_i is valid array\n Vperp = thermal_speed(T_i, particle=particle)\n elif np.isscalar(T_i.value): # only Vperp is an array\n # this means either T_i must be nan, or V_perp must be array of all nan,\n # or else we couldn't have gotten through check 1\n if isfinite_Ti:\n # T_i is valid, V_perp is an array of all nan\n # uh...\n Vperp = thermal_speed(np.repeat(T_i, len(Vperp)), particle=particle)\n # else: normal case where T_i is scalar nan and Vperp is already a valid array\n # so, do nothing\n else: # both T_i and Vperp are arrays\n # we know all the elementwise combinations have one nan and one finite, due to check 1\n # use the valid Vperps, and replace the others with those calculated from T_i\n Vperp = Vperp.copy() # avoid changing Vperp's value outside function\n Vperp[isfinite_Ti] = thermal_speed(T_i[isfinite_Ti], particle=particle)\n\n omega_ci = gyrofrequency(B, particle)\n\n r_Li = np.abs(Vperp) / omega_ci\n\n return r_Li\n\n\n@validate_quantities(n={'can_be_negative': False},\n validations_on_return={'units': [u.rad / u.s, u.Hz],\n 'equivalencies': [(u.cy / u.s, u.Hz)]})\n@angular_freq_to_hz\ndef plasma_frequency(n: u.m**-3, particle='e-', z_mean=None) -> u.rad / u.s:\n r\"\"\"Calculate the particle plasma frequency.\n\n Parameters\n ----------\n n : ~astropy.units.Quantity\n Particle number density in units convertible to per cubic meter\n\n particle : str, optional\n Representation of the particle species (e.g., 'p' for protons, 'D+'\n for deuterium, or 'He-4 +1' for singly ionized helium-4),\n which defaults to electrons. If no charge state information is\n provided, then the particles are assumed to be singly charged.\n\n z_mean : ~astropy.units.Quantity, optional\n The average ionization (arithmetic mean) for a plasma where the\n a macroscopic description is valid. If this quantity is not\n given then the atomic charge state (`int`) of the ion\n is used. This is effectively an average plasma frequency for the\n plasma where multiple charge states are present.\n\n Returns\n -------\n omega_p : ~astropy.units.Quantity\n The particle plasma frequency in radians per second.\n\n Raises\n ------\n TypeError\n If n_i is not a `~astropy.units.Quantity` or particle is not of\n an appropriate type.\n\n UnitConversionError\n If `n_i` is not in correct units\n\n ValueError\n If `n_i` contains invalid values or particle cannot be used to\n identify an particle or isotope.\n\n Warns\n -----\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed\n\n Notes\n -----\n The particle plasma frequency is\n\n .. math::\n \\omega_{pi} = Z e \\sqrt{\\frac{n_i}{\\epsilon_0 m_i}}\n\n At present, astropy.units does not allow direct conversions from\n radians/second for angular frequency to 1/second or Hz for\n frequency. The dimensionless_angles equivalency allows that\n conversion, but does not account for the factor of 2*pi. The\n alternatives are to convert to cycle/second or to do the\n conversion manually, as shown in the examples.\n\n Example\n -------\n >>> from astropy import units as u\n >>> plasma_frequency(1e19*u.m**-3, particle='p')\n <Quantity 4.16329...e+09 rad / s>\n >>> plasma_frequency(1e19*u.m**-3, particle='p', to_hz=True)\n <Quantity 6.62608...e+08 Hz>\n >>> plasma_frequency(1e19*u.m**-3, particle='D+')\n <Quantity 2.94462...e+09 rad / s>\n >>> plasma_frequency(1e19*u.m**-3)\n <Quantity 1.78398...e+11 rad / s>\n >>> plasma_frequency(1e19*u.m**-3, to_hz=True)\n <Quantity 2.83930...e+10 Hz>\n\n \"\"\"\n\n try:\n m = atomic.particle_mass(particle)\n if z_mean is None:\n # warnings.warn(\"No z_mean given, defaulting to atomic charge\",\n # PhysicsWarning)\n try:\n Z = atomic.integer_charge(particle)\n except Exception:\n Z = 1\n else:\n # using user provided average ionization\n Z = z_mean\n Z = np.abs(Z)\n # TODO REPLACE WITH Z = np.abs(_grab_charge(particle, z_mean)), some bugs atm\n except Exception:\n raise ValueError(f\"Invalid particle, {particle}, in \"\n \"plasma_frequency.\")\n\n omega_p = u.rad * Z * e * np.sqrt(n / (eps0 * m))\n\n return omega_p.si\n\n\n@validate_quantities(T_e={'can_be_negative': False,\n 'equivalencies': u.temperature_energy()},\n n_e={'can_be_negative': False})\ndef Debye_length(T_e: u.K, n_e: u.m ** -3) -> u.m:\n r\"\"\"Calculate the characteristic decay length for electric fields,\n due to charge screening.\n\n Parameters\n ----------\n T_e: ~astropy.units.Quantity\n Electron temperature\n\n n_e: ~astropy.units.Quantity\n Electron number density\n\n Returns\n -------\n lambda_D : ~astropy.units.Quantity\n The Debye length in meters\n\n Raises\n ------\n TypeError\n If either argument is not a `~astropy.units.Quantity`\n\n ~astropy.units.UnitConversionError\n If either argument is in incorrect units\n\n ValueError\n If either argument contains invalid values\n\n Warns\n -----\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed\n\n Notes\n -----\n The Debye length is the exponential scale length for charge\n screening and is given by\n\n .. math::\n \\lambda_D = \\sqrt{\\frac{\\epsilon_0 k_b T_e}{n_e e^2}}\n\n for an electron plasma with nearly stationary ions.\n\n The electrical potential will drop by a factor of 1/e every Debye\n length.\n\n Plasmas will generally be quasineutral on length scales significantly\n larger than the Debye length.\n\n See Also\n --------\n Debye_number\n\n Example\n -------\n >>> from astropy import units as u\n >>> Debye_length(5e6*u.K, 5e15*u.m**-3)\n <Quantity 0.002182... m>\n\n \"\"\"\n lambda_D = np.sqrt(eps0 * k_B * T_e / (n_e * e ** 2))\n return lambda_D\n\n\n@validate_quantities(T_e={'can_be_negative': False,\n 'equivalencies': u.temperature_energy()},\n n_e={'can_be_negative': False})\ndef Debye_number(T_e: u.K, n_e: u.m ** -3) -> u.dimensionless_unscaled:\n r\"\"\"Return the number of electrons within a sphere with a radius\n of the Debye length.\n\n Parameters\n ----------\n T_e : ~astropy.units.Quantity\n Electron temperature\n\n n_e : ~astropy.units.Quantity\n Electron number density\n\n Raises\n ------\n TypeError\n If either argument is not a `~astropy.units.Quantity`\n\n astropy.units.UnitConversionError\n If either argument is in incorrect units\n\n ValueError\n If either argument contains invalid values\n\n Warns\n -----\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed\n\n Returns\n -------\n N_D : ~astropy.units.Quantity\n Number of electrons within a sphere with a radius of the Debye length\n\n Notes\n -----\n The Debye number is the number of electrons contained within a sphere with\n a radius of a Debye length and is given by\n\n .. math::\n N_D = \\frac{4\\pi}{3}n_e\\lambda_D^3\n\n The Debye number is also known as the plasma parameter.\n\n Collective behavior requires a Debye number significantly larger than one.\n\n See Also\n --------\n Debye_length\n\n Example\n -------\n >>> from astropy import units as u\n >>> Debye_number(5e6*u.K, 5e9*u.cm**-3)\n <Quantity 2.17658...e+08>\n\n \"\"\"\n\n lambda_D = Debye_length(T_e, n_e)\n N_D = (4 / 3) * np.pi * n_e * lambda_D ** 3\n\n return N_D\n\n\n@validate_quantities(n={'can_be_negative': False},\n validations_on_return={'equivalencies': u.dimensionless_angles()})\[email protected]_input(require='charged')\ndef inertial_length(n: u.m ** -3, particle: atomic.Particle) -> u.m:\n r\"\"\"\n Calculate a charged particle's inertial length.\n\n Parameters\n ----------\n n : ~astropy.units.Quantity\n Particle number density in units convertible to m ** -3.\n\n particle : str, optional\n Representation of the particle species (e.g., 'p+' for protons,\n 'D+' for deuterium, or 'He-4 +1' for singly ionized helium-4).\n\n Returns\n -------\n d : ~astropy.units.Quantity\n The particle's inertial length in meters.\n\n Raises\n ------\n TypeError\n If n not a `~astropy.units.Quantity` or particle is not a string.\n\n ~astropy.units.UnitConversionError\n If n is not in units of a number density.\n\n ValueError\n The particle density does not have an appropriate value.\n\n Warns\n -----\n ~astropy.units.UnitsWarning\n If units are not provided and SI units are assumed.\n\n Notes\n -----\n The inertial length of a particle of species :math:`s` is given by\n\n .. math::\n d = \\frac{c}{\\omega_{ps}}\n\n The inertial length is the characteristic length scale for a\n particle to be accelerated in a plasma. The Hall effect becomes\n important on length scales shorter than the ion inertial length.\n\n The inertial length is also known as the skin depth.\n\n Example\n -------\n >>> from astropy import units as u\n >>> inertial_length(5 * u.m ** -3, 'He+')\n <Quantity 2.02985...e+08 m>\n >>> inertial_length(5 * u.m ** -3, 'e-')\n <Quantity 2376534.75... m>\n\n \"\"\"\n omega_p = plasma_frequency(n, particle=particle)\n\n return c / omega_p\n\n\n@validate_quantities\ndef magnetic_pressure(B: u.T) -> u.Pa:\n r\"\"\"\n Calculate the magnetic pressure.\n\n Parameters\n ----------\n B : ~astropy.units.Quantity\n The magnetic field in units convertible to tesla.\n\n Returns\n -------\n p_B : ~astropy.units.Quantity\n The magnetic pressure in units in pascals (newtons per square meter).\n\n Raises\n ------\n TypeError\n If the input is not a `~astropy.units.Quantity`.\n\n UnitConversionError\n If the input is not in units convertible to tesla.\n\n ValueError\n If the magnetic field strength is not a real number between\n +/- infinity.\n\n Warns\n -----\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed\n\n Notes\n -----\n The magnetic pressure is given by:\n\n .. math::\n p_B = \\frac{B^2}{2 \\mu_0}\n\n The motivation behind having two separate functions for magnetic\n pressure and magnetic energy density is that it allows greater\n insight into the physics that are being considered by the user and\n thus more readable code.\n\n See Also\n --------\n magnetic_energy_density : returns an equivalent `~astropy.units.Quantity`,\n except in units of joules per cubic meter.\n\n Example\n -------\n >>> from astropy import units as u\n >>> magnetic_pressure(0.1*u.T).to(u.Pa)\n <Quantity 3978.87... Pa>\n\n \"\"\"\n return (B ** 2) / (2 * mu0)\n\n\n@validate_quantities\ndef magnetic_energy_density(B: u.T) -> u.J / u.m ** 3:\n r\"\"\"\n Calculate the magnetic energy density.\n\n Parameters\n ----------\n B : ~astropy.units.Quantity\n The magnetic field in units convertible to tesla.\n\n Returns\n -------\n E_B : ~astropy.units.Quantity\n The magnetic energy density in units of joules per cubic meter.\n\n Raises\n ------\n TypeError\n If the input is not a Quantity.\n\n ~astropy.units.UnitConversionError\n If the input is not in units convertible to tesla.\n\n ValueError\n If the magnetic field strength does not have an appropriate.\n value.\n\n Warns\n -----\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed\n\n Notes\n -----\n The magnetic energy density is given by:\n\n .. math::\n E_B = \\frac{B^2}{2 \\mu_0}\n\n The motivation behind having two separate functions for magnetic\n pressure and magnetic energy density is that it allows greater\n insight into the physics that are being considered by the user and\n thus more readable code.\n\n See Also\n --------\n magnetic_pressure : Returns an equivalent Quantity, except in units\n of pascals.\n\n Example\n -------\n >>> from astropy import units as u\n >>> magnetic_energy_density(0.1*u.T)\n <Quantity 3978.87... J / m3>\n\n \"\"\"\n return magnetic_pressure(B)\n\n\n@validate_quantities(n_e={'can_be_negative': False},\n validations_on_return={'units': [u.rad / u.s, u.Hz],\n 'equivalencies': [(u.cy / u.s, u.Hz)]})\n@angular_freq_to_hz\ndef upper_hybrid_frequency(B: u.T, n_e: u.m ** -3) -> u.rad / u.s:\n r\"\"\"\n Return the upper hybrid frequency.\n\n Parameters\n ----------\n B : ~astropy.units.Quantity\n The magnetic field magnitude in units convertible to tesla.\n\n n_e : ~astropy.units.Quantity\n The electron number density.\n\n Returns\n -------\n omega_uh : ~astropy.units.Quantity\n The upper hybrid frequency in radians per second.\n\n Raises\n ------\n TypeError\n If either of `B` or `n_e` is not a Quantity.\n\n ~astropy.units.UnitConversionError\n If either of `B` or `n_e` is in incorrect units.\n\n ValueError\n If either of `B` or `n_e` contains invalid values or are of\n incompatible dimensions.\n\n Warns\n -----\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed\n\n Notes\n -----\n The upper hybrid frequency is given through the relation\n\n .. math::\n \\omega_{uh}^2 = \\omega_{ce}^2 + \\omega_{pe}^2\n\n where :math:`\\omega_{ce}` is the electron gyrofrequency and\n :math:`\\omega_{pe}` is the electron plasma frequency.\n\n Example\n -------\n >>> from astropy import units as u\n >>> upper_hybrid_frequency(0.2*u.T, n_e=5e19*u.m**-3)\n <Quantity 4.00459...e+11 rad / s>\n >>> upper_hybrid_frequency(0.2*u.T, n_e=5e19*u.m**-3, to_hz = True)\n <Quantity 6.37350...e+10 Hz>\n\n \"\"\"\n omega_pe = plasma_frequency(n=n_e)\n omega_ce = gyrofrequency(B)\n omega_uh = (np.sqrt(omega_pe ** 2 + omega_ce ** 2))\n\n return omega_uh\n\n\n@validate_quantities(n_i={'can_be_negative': False},\n validations_on_return={'units': [u.rad / u.s, u.Hz],\n 'equivalencies': [(u.cy / u.s, u.Hz)]})\n@angular_freq_to_hz\ndef lower_hybrid_frequency(B: u.T, n_i: u.m ** -3, ion='p+') -> u.rad / u.s:\n r\"\"\"\n Return the lower hybrid frequency.\n\n Parameters\n ----------\n B : ~astropy.units.Quantity\n The magnetic field magnitude in units convertible to tesla.\n\n n_i : ~astropy.units.Quantity\n Ion number density.\n\n ion : str, optional\n Representation of the ion species (e.g., 'p' for protons, 'D+'\n for deuterium, or 'He-4 +1' for singly ionized helium-4),\n which defaults to protons. If no charge state information is\n provided, then the ions are assumed to be singly charged.\n\n Returns\n -------\n omega_lh : ~astropy.units.Quantity\n The lower hybrid frequency in radians per second.\n\n Raises\n ------\n TypeError\n If either of `B` or `n_i` is not a `~astropy.units.Quantity`,\n or ion is of an inappropriate type.\n\n ~astropy.units.UnitConversionError\n If either of `B` or `n_i` is in incorrect units.\n\n ValueError\n If either of `B` or `n_i` contains invalid values or are of\n incompatible dimensions, or ion cannot be used to identify an\n ion or isotope.\n\n Warns\n -----\n ~astropy.units.UnitsWarning\n If units are not provided, SI units are assumed\n\n Notes\n -----\n The lower hybrid frequency is given through the relation\n\n .. math::\n \\frac{1}{\\omega_{lh}^2} = \\frac{1}{\\omega_{ci}^2 + \\omega_{pi}^2} +\n \\frac{1}{\\omega_{ci}\\omega_{ce}}\n\n where :math:`\\omega_{ci}` is the ion gyrofrequency,\n :math:`\\omega_{ce}` is the electron gyrofrequency, and\n :math:`\\omega_{pi}` is the ion plasma frequency.\n\n Example\n -------\n >>> from astropy import units as u\n >>> lower_hybrid_frequency(0.2*u.T, n_i=5e19*u.m**-3, ion='D+')\n <Quantity 5.78372...e+08 rad / s>\n >>> lower_hybrid_frequency(0.2*u.T, n_i=5e19*u.m**-3, ion='D+', to_hz = True)\n <Quantity 92050879.3... Hz>\n\n \"\"\"\n\n # We do not need a charge state here, so the sole intent is to\n # catch invalid ions.\n try:\n atomic.integer_charge(ion)\n except Exception:\n raise ValueError(\"Invalid ion in lower_hybrid_frequency.\")\n\n omega_ci = gyrofrequency(B, particle=ion)\n omega_pi = plasma_frequency(n_i, particle=ion)\n omega_ce = gyrofrequency(B)\n omega_lh = ((omega_ci * omega_ce) ** -1 + omega_pi ** -2) ** -0.5\n # TODO possibly optimize the above line via np.sqrt\n omega_lh = omega_lh\n\n return omega_lh\n" ]
[ [ "numpy.logical_xor", "numpy.abs", "numpy.sqrt", "numpy.isfinite", "numpy.isscalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fkubota/enjoy-vim
[ "f1f87ff569ebeea3b84993add200a2c7125cab7a" ]
[ "enjoy/001_20210224/live_coding/func.py" ]
[ "import pandas as pd\nfrom sklearn.datasets import load_iris\n\n\ndef get_datasets():\n iris = load_iris()\n data = iris.data\n feature_names = iris.feature_names\n target = iris.target\n df = pd.DataFrame(data, columns=feature_names)\n\n return df, target\n" ]
[ [ "sklearn.datasets.load_iris", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jmirabel/agimus-demos
[ "d18703626f3bac322dec788ad9506495771d15a3" ]
[ "talos/camera_calibration/script_hpp.py" ]
[ "# /usr/bin/env python\nimport random\nfrom math import pi\n\nimport numpy as np\nfrom hpp import Quaternion, Transform\nfrom hpp.corbaserver.manipulation import (\n ConstraintGraph,\n ProblemSolver,\n Rule,\n newProblem,\n)\nfrom hpp.corbaserver.manipulation.robot import Robot\nfrom hpp.gepetto.manipulation import ViewerFactory\n\n\nnewProblem()\n\nRobot.packageName = \"talos_data\"\nRobot.urdfName = \"talos\"\nRobot.urdfSuffix = \"_full_v2\"\nRobot.srdfSuffix = \"\"\n\n\nclass Mire(object):\n rootJointType = \"freeflyer\"\n packageName = \"agimus_demos\"\n urdfName = \"calibration_mire\"\n urdfSuffix = \"\"\n srdfSuffix = \"\"\n name = \"mire\"\n handles = [\"mire/left\", \"mire/right\"]\n\n\nrobot = Robot(\"dev\", \"talos\", rootJointType=\"freeflyer\")\nrobot.leftAnkle = \"talos/leg_left_6_joint\"\nrobot.rightAnkle = \"talos/leg_right_6_joint\"\n\nrobot.setJointBounds(\"talos/root_joint\", [-1, 1, -1, 1, 0.5, 1.5])\n\nps = ProblemSolver(robot)\nps.setRandomSeed(123)\nps.selectPathProjector(\"Progressive\", 0.2)\nps.setErrorThreshold(1e-3)\nps.setMaxIterProjection(40)\n\nps.addPathOptimizer(\"SimpleTimeParameterization\")\n\nvf = ViewerFactory(ps)\nvf.loadObjectModel(Mire, \"mire\")\nrobot.setJointBounds(\"mire/root_joint\", [-1, 1, -1, 1, 0, 2])\n\nhalf_sitting = [\n 0,\n 0,\n 1.0192720229567027,\n 0,\n 0,\n 0,\n 1, # root_joint\n 0.0,\n 0.0,\n -0.411354,\n 0.859395,\n -0.448041,\n -0.001708, # leg_left\n 0.0,\n 0.0,\n -0.411354,\n 0.859395,\n -0.448041,\n -0.001708, # leg_right\n 0,\n 0.006761, # torso\n 0.25847,\n 0.173046,\n -0.0002,\n -0.525366,\n 0,\n 0,\n 0.1, # arm_left\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0, # gripper_left\n -0.25847,\n -0.173046,\n 0.0002,\n -0.525366,\n 0,\n 0,\n 0.1, # arm_right\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0, # gripper_right\n 0,\n 0, # head\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 1, # mire\n]\nq_init = robot.getCurrentConfig()\n\n# INIT CHESSBOARD AND CAMERA PARAM AND FUNCTIONS\nchessboard_pts = [\n [-0.05, -0.1, 0.0],\n [0.25, -0.1, 0.0],\n [0.25, 0.1, 0.0],\n [-0.05, 0.1, 0.0],\n]\nchessboard_normal = np.matrix([0.0, 0.0, -1.0]).transpose()\n\nimage_width = 1280\nimage_height = 720\nprojection_matrix = np.matrix(\n [[999.195, 0.0, 646.3244], [0.0, 1008.400, 359.955], [0.0, 0.0, 1.0]]\n)\n\ndist_from_camera = [0.3, 0.45]\ncamera_position = np.matrix([0.0, 0.0, 0.0])\n\n\ndef isInImage(coord):\n x = coord[0, 0]\n y = coord[1, 0]\n return (x >= 0) & (x < image_width) & (y >= 0) & (y < image_height)\n\n\ndef projectPoint(Rt, pt):\n coord = projection_matrix * Rt * np.vstack((pt, np.matrix([1])))\n coord /= coord[2, 0]\n return coord[0:2, 0]\n\n\n# END INIT CHESSBOARD AND CAMERA PARAM AND FUNCTIONS\n\n\nps.addPartialCom(\"talos\", [\"talos/root_joint\"])\nps.addPartialCom(\"talos_mire\", [\"talos/root_joint\", \"mire/root_joint\"])\n\nps.createStaticStabilityConstraints(\n \"balance\", half_sitting, \"talos\", ProblemSolver.FIXED_ON_THE_GROUND\n)\nfoot_placement = [\"balance/pose-left-foot\", \"balance/pose-right-foot\"]\nfoot_placement_complement = []\n\nrobot.setCurrentConfig(half_sitting)\ncom_wf = np.array(ps.getPartialCom(\"talos\"))\ntf_la = Transform(robot.getJointPosition(robot.leftAnkle))\ncom_la = tf_la.inverse().transform(com_wf)\n\nps.createRelativeComConstraint(\n \"com_talos_mire\", \"talos_mire\", robot.leftAnkle, com_la.tolist(), (True, True, True)\n)\nps.createRelativeComConstraint(\n \"com_talos\", \"talos\", robot.leftAnkle, com_la.tolist(), (True, True, True)\n)\n\nleft_gripper_lock = []\nright_gripper_lock = []\nhead_lock = []\nother_lock = [\"talos/torso_1_joint\"]\nfor n in robot.jointNames:\n s = robot.getJointConfigSize(n)\n r = robot.rankInConfiguration[n]\n if n.startswith(\"talos/gripper_right\"):\n ps.createLockedJoint(n, n, half_sitting[r : r + s])\n right_gripper_lock.append(n)\n elif n.startswith(\"talos/gripper_left\"):\n ps.createLockedJoint(n, n, half_sitting[r : r + s])\n left_gripper_lock.append(n)\n elif n.startswith(\"talos/head\"):\n ps.createLockedJoint(n, n, half_sitting[r : r + s])\n head_lock.append(n)\n elif n in other_lock:\n ps.createLockedJoint(n, n, half_sitting[r : r + s])\n\ngraph = ConstraintGraph.buildGenericGraph(\n robot,\n \"graph\",\n [\"talos/left_gripper\", \"talos/right_gripper\"],\n [\"mire\"],\n [Mire.handles],\n [[]], # contacts per object\n [], # env contacts\n [\n Rule([\"talos/left_gripper\"], [Mire.handles[0]], True),\n Rule([\"talos/right_gripper\"], [Mire.handles[1]], True),\n ],\n)\n\ngraph.setConstraints(\n graph=True,\n lockDof=left_gripper_lock + right_gripper_lock + other_lock,\n numConstraints=[\"com_talos_mire\"] + foot_placement,\n)\n\ngraph.initialize()\n\n\ndef setGuassianShooter(mean, stddev=[0.01] * robot.getNumberDof()):\n robot.setCurrentConfig(mean)\n # Use the current robot velocity as std dev for the shooter\n # Higher values on the arm with the chessboard might limit the use of the other DoFs\n ps.setParameter(\"ConfigurationShooter/Gaussian/useRobotVelocity\", True)\n robot.client.basic.robot.setCurrentVelocity(stddev)\n ps.client.basic.problem.selectConfigurationShooter(\"Gaussian\")\n\n\nqrand = half_sitting[:]\nq_proj_list = []\n\nexpected_poses = []\n\n# Randomize the position of the chessboard\n# The chessboard is on the right of the plate, so we shift the gaze (pointing to the centre of the plate) to the left\nys = [300, 350, 400, 450]\nfor x in [400, 450, 500, 550, 600, 650, 700, 750, 800]:\n for y in ys:\n # Keep only poses where the chessboard can be seen from the camera\n setGuassianShooter(qrand)\n shoot_pose = 0\n shoot_cfg = 0\n while True:\n chessboard_Z = random.uniform(dist_from_camera[0], dist_from_camera[1])\n chessboard_X = (\n (x - projection_matrix[0, 2]) / projection_matrix[0, 0] * chessboard_Z\n )\n chessboard_Y = (\n (y - projection_matrix[1, 2]) / projection_matrix[1, 1] * chessboard_Z\n )\n chessboard_position = np.matrix([chessboard_X, chessboard_Y, chessboard_Z])\n\n q = Quaternion().fromRPY(\n random.uniform(-pi / 12.0, pi / 12.0),\n random.uniform(-pi / 12.0, pi / 12.0),\n random.uniform(-pi / 12.0, pi / 12.0),\n )\n shoot_pose += 1\n R = q.toRotationMatrix()\n if (R * chessboard_normal)[2] >= 0.0:\n continue\n\n Rt = np.hstack((R, (chessboard_position - camera_position).transpose()))\n\n if not all(\n [\n isInImage(projectPoint(Rt, np.matrix(pt).transpose()))\n for pt in chessboard_pts\n ]\n ):\n continue\n\n q = Quaternion().fromRPY(-pi, 0, -pi) * q # Switch tn the real camera frame\n\n chessboard_pose = (\n chessboard_position[0, 0],\n chessboard_position[0, 1],\n chessboard_position[0, 2],\n ) + q.toTuple()\n ps.createTransformationConstraint(\n \"gaze\",\n \"talos/rgbd_rgb_optical_joint\",\n \"mire/root_joint\",\n chessboard_pose,\n [True] * 6,\n )\n\n ps.resetConstraints()\n ps.setNumericalConstraints(\n \"proj\",\n foot_placement\n + [\"com_talos_mire\", \"talos/left_gripper grasps mire/left\", \"gaze\"],\n )\n ps.setLockedJointConstraints(\n \"proj\", left_gripper_lock + right_gripper_lock + other_lock\n )\n\n res, qproj, err = ps.applyConstraints(qrand)\n if res:\n valid, msg = robot.isConfigValid(qproj)\n if valid:\n print(\"Found pose\", shoot_pose, \"\\t\", shoot_cfg)\n expected_poses.append(chessboard_pose)\n q_proj_list.append(qproj)\n qrand = qproj[:]\n break\n # It may be needed to shoot from time to time but so far it works without doing it.\n # qrand = robot.shootRandomConfig()\n # shoot_cfg += 1\n ys.reverse()\n\nhpp_poses = []\nfor q in q_proj_list:\n robot.setCurrentConfig(q)\n oMc = Transform(robot.getJointPosition(\"talos/rgbd_rgb_optical_joint\"))\n oMm = Transform(robot.getJointPosition(\"mire/root_joint\"))\n cMm = oMc.inverse() * oMm\n hpp_poses.append(cMm.toTuple())\n\nres, hs_proj, err = graph.applyNodeConstraints(\n \"talos/left_gripper grasps mire/left\", half_sitting\n)\n\npaths = list()\nfailed = False\nfor i, (q1, q2) in enumerate(zip([hs_proj] + q_proj_list, q_proj_list + [hs_proj])):\n res, pid, msg = ps.directPath(q1, q2, True)\n if res:\n print(\"Path from\", i, \"to\", i + 1, \":\", pid)\n paths.append(pid)\n else:\n print(\"Could not joint\", i, \"to\", i + 1, \":\", msg)\n failed = True\n\nps.setParameter(\"SimpleTimeParameterization/safety\", 0.5)\nps.setParameter(\"SimpleTimeParameterization/order\", 2)\nps.setParameter(\"SimpleTimeParameterization/maxAcceleration\", 1.0)\n\ncleanPaths = True\njoinPaths = False\n\n\ndef displayPaths(viewer, paths):\n from hpp.gepetto import PathPlayer\n\n pp = PathPlayer(viewer, client=ps.client.basic)\n for p in paths:\n pp(p)\n\n\nif not failed:\n if joinPaths:\n # join path\n i0 = paths[0]\n for i in paths[1:]:\n ps.concatenatePath(i0, i)\n\n if cleanPaths:\n for k, i in enumerate(paths[1:]):\n ps.erasePath(i - k)\n\n ps.optimizePath(i0)\n print(\n \"Optimized path:\",\n ps.numberPaths() - 1,\n \",\",\n ps.pathLength(ps.numberPaths() - 1),\n )\n\n else:\n optpaths = []\n for i in paths:\n ps.optimizePath(i)\n optpaths.append(ps.numberPaths() - 1)\n\n print(\"Solution paths are\\noptpaths=\", str(optpaths))\n print(\"displayPaths(v,optpaths) # to visualize paths\")\n" ]
[ [ "numpy.matrix" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Khaled-Abdelhamid/project-ml-microservice-kubernetes
[ "4b4b3e50447ee67235e829c7a5c00b63ec0a7483" ]
[ "app.py" ]
[ "from flask import Flask, request, jsonify\nfrom flask.logging import create_logger\nimport logging\n\nimport pandas as pd\nfrom sklearn.externals import joblib\nfrom sklearn.preprocessing import StandardScaler\n\napp = Flask(__name__)\nLOG = create_logger(app)\nLOG.setLevel(logging.INFO)\n\n\ndef scale(payload):\n \"\"\"Scales Payload\"\"\"\n\n LOG.info(f\"Scaling Payload: \\n{payload}\")\n scaler = StandardScaler().fit(payload.astype(float))\n scaled_adhoc_predict = scaler.transform(payload.astype(float))\n return scaled_adhoc_predict\n\n\[email protected](\"/\")\ndef home():\n html = \"<h3>Sklearn Prediction Home</h3>\"\n return html.format(format)\n\n\[email protected](\"/predict\", methods=[\"POST\"])\ndef predict():\n \"\"\"Performs an sklearn prediction\n \n input looks like:\n {\n \"CHAS\":{\n \"0\":0\n },\n \"RM\":{\n \"0\":6.575\n },\n \"TAX\":{\n \"0\":296.0\n },\n \"PTRATIO\":{\n \"0\":15.3\n },\n \"B\":{\n \"0\":396.9\n },\n \"LSTAT\":{\n \"0\":4.98\n }\n \n result looks like:\n { \"prediction\": [ <val> ] }\n \n \"\"\"\n\n # Logging the input payload\n json_payload = request.json\n LOG.info(f\"JSON payload: \\n{json_payload}\")\n inference_payload = pd.DataFrame(json_payload)\n LOG.info(f\"Inference payload DataFrame: \\n{inference_payload}\")\n # scale the input\n scaled_payload = scale(inference_payload)\n # get an output prediction from the pretrained model, clf\n prediction = list(clf.predict(scaled_payload))\n # TO DO: Log the output prediction value\n LOG.info(f\"Prediction output: {prediction}\")\n return jsonify({\"prediction\": prediction})\n\n\nif __name__ == \"__main__\":\n # load pretrained model as clf\n clf = joblib.load(\"./model_data/boston_housing_prediction.joblib\")\n app.run(host=\"0.0.0.0\", port=80, debug=True) # specify port=80\n" ]
[ [ "sklearn.preprocessing.StandardScaler", "sklearn.externals.joblib.load", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
anderson2981/mirgecom
[ "8d5d44145b5984f27a3dcda30956756a9cbcd284" ]
[ "mirgecom/euler.py" ]
[ "r\"\"\":mod:`mirgecom.euler` helps solve Euler's equations of gas dynamics.\n\nEuler's equations of gas dynamics:\n\n.. math::\n\n \\partial_t \\mathbf{Q} = -\\nabla\\cdot{\\mathbf{F}} +\n (\\mathbf{F}\\cdot\\hat{n})_{\\partial\\Omega} + \\mathbf{S}\n\nwhere:\n\n- state $\\mathbf{Q} = [\\rho, \\rho{E}, \\rho\\vec{V} ]$\n- flux $\\mathbf{F} = [\\rho\\vec{V},(\\rho{E} + p)\\vec{V},\n (\\rho(\\vec{V}\\otimes\\vec{V}) + p*\\mathbf{I})]$,\n- domain boundary $\\partial\\Omega$,\n- sources $\\mathbf{S} = [{(\\partial_t{\\rho})}_s,\n {(\\partial_t{\\rho{E}})}_s, {(\\partial_t{\\rho\\vec{V}})}_s]$\n\n\nState Vector Handling\n^^^^^^^^^^^^^^^^^^^^^\n\n.. autoclass:: ConservedVars\n.. autofunction:: split_conserved\n.. autofunction:: join_conserved\n\nRHS Evaluation\n^^^^^^^^^^^^^^\n\n.. autofunction:: inviscid_flux\n.. autofunction:: inviscid_operator\n\nTime Step Computation\n^^^^^^^^^^^^^^^^^^^^^\n\n.. autofunction:: get_inviscid_timestep\n.. autofunction:: get_inviscid_cfl\n\"\"\"\n\n__copyright__ = \"\"\"\nCopyright (C) 2020 University of Illinois Board of Trustees\n\"\"\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom dataclasses import dataclass\n\nimport numpy as np\nfrom meshmode.dof_array import thaw\nfrom meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa\nfrom grudge.eager import (\n interior_trace_pair,\n cross_rank_trace_pairs\n)\n\n\n@dataclass(frozen=True)\nclass ConservedVars: # FIXME: Name?\n r\"\"\"Resolve the canonical conserved quantities.\n\n Get the canonical conserved quantities (mass, energy, momentum)\n per unit volume = $(\\rho,\\rho{E},\\rho\\vec{V})$ from an agglomerated\n object array.\n\n .. attribute:: dim\n\n .. attribute:: mass\n\n Mass per unit volume\n\n .. attribute:: energy\n\n Energy per unit volume\n\n .. attribute:: momentum\n\n Momentum vector per unit volume\n\n .. automethod:: join\n .. automethod:: replace\n \"\"\"\n\n mass: np.ndarray\n energy: np.ndarray\n momentum: np.ndarray\n\n @property\n def dim(self):\n \"\"\"Return the number of physical dimensions.\"\"\"\n return len(self.momentum)\n\n def join(self):\n \"\"\"Call :func:`join_conserved` on *self*.\"\"\"\n return join_conserved(\n dim=self.dim,\n mass=self.mass,\n energy=self.energy,\n momentum=self.momentum)\n\n def replace(self, **kwargs):\n \"\"\"Return a copy of *self* with the attributes in *kwargs* replaced.\"\"\"\n from dataclasses import replace\n return replace(self, **kwargs)\n\n\ndef _aux_shape(ary, leading_shape):\n \"\"\":arg leading_shape: a tuple with which ``ary.shape`` is expected to begin.\"\"\"\n from meshmode.dof_array import DOFArray\n if (isinstance(ary, np.ndarray) and ary.dtype == np.object\n and not isinstance(ary, DOFArray)):\n naxes = len(leading_shape)\n if ary.shape[:naxes] != leading_shape:\n raise ValueError(\"array shape does not start with expected leading \"\n \"dimensions\")\n return ary.shape[naxes:]\n else:\n if leading_shape != ():\n raise ValueError(\"array shape does not start with expected leading \"\n \"dimensions\")\n return ()\n\n\ndef split_conserved(dim, q):\n \"\"\"Get the canonical conserved quantities.\n\n Return a :class:`ConservedVars` that is the canonical conserved quantities,\n mass, energy, and momentum from the agglomerated object array extracted\n from the state vector *q*.\n \"\"\"\n assert len(q) == 2+dim\n return ConservedVars(mass=q[0], energy=q[1], momentum=q[2:2+dim])\n\n\ndef join_conserved(dim, mass, energy, momentum):\n \"\"\"Create an agglomerated solution array from the conserved quantities.\"\"\"\n from pytools import single_valued\n aux_shape = single_valued([\n _aux_shape(mass, ()),\n _aux_shape(energy, ()),\n _aux_shape(momentum, (dim,))])\n\n result = np.zeros((2+dim,) + aux_shape, dtype=object)\n result[0] = mass\n result[1] = energy\n result[2:] = momentum\n return result\n\n\ndef inviscid_flux(discr, eos, q):\n r\"\"\"Compute the inviscid flux vectors from flow solution *q*.\n\n The inviscid fluxes are\n $(\\rho\\vec{V},(\\rho{E}+p)\\vec{V},\\rho(\\vec{V}\\otimes\\vec{V})+p\\mathbf{I})$\n \"\"\"\n dim = discr.dim\n cv = split_conserved(dim, q)\n p = eos.pressure(cv)\n\n mom = cv.momentum\n return join_conserved(dim,\n mass=mom,\n energy=mom * (cv.energy + p) / cv.mass,\n momentum=np.outer(mom, mom)/cv.mass + np.eye(dim)*p)\n\n\ndef _get_wavespeed(dim, eos, cv: ConservedVars):\n \"\"\"Return the maximum wavespeed in for flow solution *q*.\"\"\"\n actx = cv.mass.array_context\n\n v = cv.momentum / cv.mass\n return actx.np.sqrt(np.dot(v, v)) + eos.sound_speed(cv)\n\n\ndef _facial_flux(discr, eos, q_tpair, local=False):\n \"\"\"Return the flux across a face given the solution on both sides *q_tpair*.\n\n Parameters\n ----------\n eos: mirgecom.eos.GasEOS\n Implementing the pressure and temperature functions for\n returning pressure and temperature as a function of the state q.\n\n q_tpair: :class:`grudge.symbolic.TracePair`\n Trace pair for the face upon which flux calculation is to be performed\n\n local: bool\n Indicates whether to skip projection of fluxes to \"all_faces\" or not. If\n set to *False* (the default), the returned fluxes are projected to\n \"all_faces.\" If set to *True*, the returned fluxes are not projected to\n \"all_faces\"; remaining instead on the boundary restriction.\n \"\"\"\n dim = discr.dim\n\n actx = q_tpair[0].int.array_context\n\n flux_int = inviscid_flux(discr, eos, q_tpair.int)\n flux_ext = inviscid_flux(discr, eos, q_tpair.ext)\n\n # Lax-Friedrichs/Rusanov after [Hesthaven_2008]_, Section 6.6\n flux_avg = 0.5*(flux_int + flux_ext)\n\n lam = actx.np.maximum(\n _get_wavespeed(dim, eos=eos, cv=split_conserved(dim, q_tpair.int)),\n _get_wavespeed(dim, eos=eos, cv=split_conserved(dim, q_tpair.ext))\n )\n\n normal = thaw(actx, discr.normal(q_tpair.dd))\n flux_weak = (\n flux_avg @ normal\n - 0.5 * lam * (q_tpair.ext - q_tpair.int))\n\n if local is False:\n return discr.project(q_tpair.dd, \"all_faces\", flux_weak)\n return flux_weak\n\n\ndef inviscid_operator(discr, eos, boundaries, q, t=0.0):\n r\"\"\"Compute RHS of the Euler flow equations.\n\n Returns\n -------\n numpy.ndarray\n The right-hand-side of the Euler flow equations:\n\n .. math::\n\n \\dot{\\mathbf{q}} = \\mathbf{S} - \\nabla\\cdot\\mathbf{F} +\n (\\mathbf{F}\\cdot\\hat{n})_{\\partial\\Omega}\n\n Parameters\n ----------\n q\n State array which expects at least the canonical conserved quantities\n (mass, energy, momentum) for the fluid at each point.\n\n boundaries\n Dictionary of boundary functions, one for each valid btag\n\n t\n Time\n\n eos: mirgecom.eos.GasEOS\n Implementing the pressure and temperature functions for\n returning pressure and temperature as a function of the state q.\n \"\"\"\n vol_flux = inviscid_flux(discr, eos, q)\n dflux = discr.weak_div(vol_flux)\n\n interior_face_flux = _facial_flux(\n discr, eos=eos, q_tpair=interior_trace_pair(discr, q))\n\n # Domain boundaries\n domain_boundary_flux = sum(\n _facial_flux(\n discr,\n q_tpair=boundaries[btag].boundary_pair(discr,\n eos=eos,\n btag=btag,\n t=t,\n q=q),\n eos=eos\n )\n for btag in boundaries\n )\n\n # Flux across partition boundaries\n partition_boundary_flux = sum(\n _facial_flux(discr, eos=eos, q_tpair=part_pair)\n for part_pair in cross_rank_trace_pairs(discr, q)\n )\n\n return discr.inverse_mass(\n dflux - discr.face_mass(interior_face_flux + domain_boundary_flux\n + partition_boundary_flux)\n )\n\n\ndef get_inviscid_cfl(discr, eos, dt, q):\n \"\"\"Calculate and return CFL based on current state and timestep.\"\"\"\n wanted_dt = get_inviscid_timestep(discr, eos=eos, cfl=1.0, q=q)\n return dt / wanted_dt\n\n\ndef get_inviscid_timestep(discr, eos, cfl, q):\n \"\"\"Routine (will) return the (local) maximum stable inviscid timestep.\n\n Currently, it's a hack waiting for the geometric_factor helpers port\n from grudge.\n \"\"\"\n dim = discr.dim\n mesh = discr.mesh\n order = max([grp.order for grp in discr.discr_from_dd(\"vol\").groups])\n nelements = mesh.nelements\n nel_1d = nelements ** (1.0 / (1.0 * dim))\n\n # This roughly reproduces the timestep AK used in wave toy\n dt = (1.0 - 0.25 * (dim - 1)) / (nel_1d * order ** 2)\n return cfl * dt\n\n# dt_ngf = dt_non_geometric_factor(discr.mesh)\n# dt_gf = dt_geometric_factor(discr.mesh)\n# wavespeeds = _get_wavespeed(w,eos=eos)\n# max_v = clmath.max(wavespeeds)\n# return c*dt_ngf*dt_gf/max_v\n" ]
[ [ "numpy.dot", "numpy.eye", "numpy.zeros", "numpy.outer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
avnishsachar/GPRPy
[ "47d0f2928dbd7ea446e8cd40ab07d7fed879588b" ]
[ "scripts/gprpy.py" ]
[ "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom gprIO_MALA import readMALA\n\nclass gprpyProfile:\n '''\n Ground penetrating radar data processing and visualization class \n for common-offset profiles.\n '''\n def __init__(self,filename=None):\n '''\n Initialization for a gprpyProfile object. Initialization can be \n empty or with a provided filename for the GPR data.\n\n INPUT:\n filename data file name. Currently supported formats:.rd3 (MALA)\n '''\n self.history = [\"mygpr = gp.gprpyProfile()\"]\n # Initialize previous for undo\n self.previous = {}\n if filename is not None:\n self.importdata(filename)\n self.showProfile()\n plt.show() \n \n def importdata(self,filename):\n '''\n Loads .rad (MALA) data files and populates all the gprpyProfile fields.\n '''\n file_name, file_ext = os.path.splitext(filename) \n\n if file_ext==\".rad\" or file_ext==\".rd3\":\n self.data, self.info = readMALA(file_name)\n self.twtt = np.linspace(0,float(self.info[\"TIMEWINDOW\"]),int(self.info[\"SAMPLES\"]))\n self.profilePos = float(self.info[\"DISTANCE INTERVAL\"])*np.arange(0,self.data.shape[1])\n self.velocity = None\n self.depth = None\n self.maxTopo = None\n self.minTopo = None\n self.threeD = None\n self.data_pretopo = None\n self.twtt_pretopo = None\n \n else:\n print(\"Can only read rad or rd3 files\")\n \n # This is a helper function\n def prepProfileFig(self, color=\"gray\", contrast=1.0, yrng=None, xrng=None, asp=None):\n '''\n This is a helper function.\n It prepares the plot showing the processed profile data.\n \n INPUT:\n color \"gray\", or \"bwr\" for blue-white-red,\n or any other Matplotlib color map [default: \"gray\"]\n contrast Factor to increase contrast by reducing color range.\n [default = 1.0]\n yrng y-axis range to show [default: None, meaning \"everything\"]\n xrng x-axis range to show [default: None, meaning \"everything\"]\n asp aspect ratio [default: None, meaning automatic]\n\n OUTPUT:\n contrast contrast value used to prepare the figure\n color color value used to prepare the figure\n yrng yrng value used to prepare the figure\n xrng xrng value used to prepare the figure\n asp asp value used to prepare the figure\n '''\n dx=self.profilePos[3]-self.profilePos[2]\n dt=self.twtt[3]-self.twtt[2]\n stdcont = np.nanmax(np.abs(self.data)[:]) \n if self.velocity is None:\n plt.imshow(self.data,cmap=color,extent=[min(self.profilePos)-dx/2.0,\n max(self.profilePos)+dx/2.0,\n max(self.twtt)+dt/2.0,\n min(self.twtt)-dt/2.0],\n aspect=\"auto\",vmin=-stdcont/contrast, vmax=stdcont/contrast)\n plt.gca().set_ylabel(\"two-way travel time [ns]\")\n plt.gca().invert_yaxis()\n if yrng is not None:\n yrng=[np.max(yrng),np.min(yrng)]\n else:\n yrng=[np.max(self.twtt),np.min(self.twtt)]\n \n elif self.maxTopo is None:\n dy=dt*self.velocity\n plt.imshow(self.data,cmap=color,extent=[min(self.profilePos)-dx/2.0,\n max(self.profilePos)+dx/2.0,\n max(self.depth)+dy/2.0,\n min(self.depth)-dy/2.0],\n aspect=\"auto\",vmin=-stdcont/contrast, vmax=stdcont/contrast)\n plt.gca().set_ylabel(\"depth [m]\")\n plt.gca().invert_yaxis()\n if yrng is not None:\n yrng=[np.max(yrng),np.min(yrng)]\n else:\n yrng=[np.max(self.depth),np.min(self.depth)]\n \n else:\n dy=dt*self.velocity\n plt.imshow(self.data,cmap=color,extent=[min(self.profilePos)-dx/2.0,\n max(self.profilePos)+dx/2.0,\n self.minTopo-max(self.depth)-dy/2.0,\n self.maxTopo-min(self.depth)+dy/2.0],\n aspect=\"auto\",vmin=-stdcont/contrast, vmax=stdcont/contrast) \n plt.gca().set_ylabel(\"elevation [m]\")\n if yrng is None:\n yrng=[self.minTopo-np.max(self.depth),self.maxTopo-np.min(self.depth)]\n \n if xrng is None:\n xrng=[min(self.profilePos),max(self.profilePos)] \n print(xrng)\n if yrng is not None:\n plt.ylim(yrng)\n \n if xrng is not None:\n plt.xlim(xrng)\n\n if asp is not None:\n plt.gca().set_aspect(asp)\n\n plt.gca().get_xaxis().set_visible(True)\n plt.gca().get_yaxis().set_visible(True) \n plt.gca().set_xlabel(\"distance [m]\")\n plt.gca().xaxis.tick_top()\n plt.gca().xaxis.set_label_position('top')\n \n return contrast, color, yrng, xrng, asp\n \n def showProfile(self, **kwargs):\n '''\n Plots the profile using Matplotlib. \n You need to run .show() afterward to show it \n '''\n self.prepProfileFig(**kwargs)\n plt.show(block=False)\n\nif __name__ == '__main__':\n gprpyProfile(\"../data/VGT/Profile_0006.rd3\")\n " ]
[ [ "matplotlib.pyplot.gca", "numpy.abs", "numpy.min", "numpy.arange", "matplotlib.pyplot.ylim", "numpy.max", "matplotlib.pyplot.xlim", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shenberg/detectron2
[ "0b62f13106f1d30097563b059bfe9ae2803b2271" ]
[ "detectron2/modeling/meta_arch/panoptic_fpn.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport torch\nfrom torch import nn\n\nfrom detectron2.structures import ImageList\n\nfrom ..backbone import build_backbone\nfrom ..postprocessing import detector_postprocess, sem_seg_postprocess\nfrom ..proposal_generator import build_proposal_generator\nfrom ..roi_heads import build_roi_heads\nfrom .build import META_ARCH_REGISTRY\nfrom .semantic_seg import build_sem_seg_head\n\n__all__ = [\"PanopticFPN\"]\n\n\n@META_ARCH_REGISTRY.register()\nclass PanopticFPN(nn.Module):\n \"\"\"\n Main class for Panoptic FPN architectures (see https://arxiv.org/abd/1901.02446).\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n\n self.device = torch.device(cfg.MODEL.DEVICE)\n\n self.instance_loss_weight = cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT\n\n # options when combining instance & semantic outputs\n self.combine_on = cfg.MODEL.PANOPTIC_FPN.COMBINE.ENABLED\n self.combine_overlap_threshold = cfg.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH\n self.combine_stuff_area_limit = cfg.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT\n self.combine_instances_confidence_threshold = (\n cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH\n )\n\n self.backbone = build_backbone(cfg)\n self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())\n self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape())\n self.sem_seg_head = build_sem_seg_head(cfg, self.backbone.output_shape())\n\n pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)\n pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n self.to(self.device)\n\n def forward(self, batched_inputs):\n \"\"\"\n Args:\n batched_inputs: a list, batched outputs of :class:`DatasetMapper`.\n Each item in the list contains the inputs for one image.\n\n For now, each item in the list is a dict that contains:\n image: Tensor, image in (C, H, W) format.\n instances: Instances\n sem_seg: semantic segmentation ground truth.\n Other information that's included in the original dicts, such as:\n \"height\", \"width\" (int): the output resolution of the model, used in inference.\n See :meth:`postprocess` for details.\n\n Returns:\n list[dict]: each dict is the results for one image. The dict\n contains the following keys:\n \"instances\": see :meth:`GeneralizedRCNN.forward` for its format.\n \"sem_seg\": see :meth:`SemanticSegmentor.forward` for its format.\n \"panoptic_seg\": available when `PANOPTIC_FPN.COMBINE.ENABLED`.\n See the return value of\n :func:`combine_semantic_and_instance_outputs` for its format.\n \"\"\"\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images = [self.normalizer(x) for x in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n features = self.backbone(images.tensor)\n\n if \"proposals\" in batched_inputs[0]:\n proposals = [x[\"proposals\"].to(self.device) for x in batched_inputs]\n proposal_losses = {}\n\n if \"sem_seg\" in batched_inputs[0]:\n gt_sem_seg = [x[\"sem_seg\"].to(self.device) for x in batched_inputs]\n gt_sem_seg = ImageList.from_tensors(\n gt_sem_seg, self.backbone.size_divisibility, self.sem_seg_head.ignore_value\n ).tensor\n else:\n gt_sem_seg = None\n sem_seg_results, sem_seg_losses = self.sem_seg_head(features, gt_sem_seg)\n\n if \"instances\" in batched_inputs[0]:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n else:\n gt_instances = None\n if self.proposal_generator:\n proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)\n detector_results, detector_losses = self.roi_heads(\n images, features, proposals, gt_instances\n )\n\n if self.training:\n losses = {}\n losses.update(sem_seg_losses)\n losses.update({k: v * self.instance_loss_weight for k, v in detector_losses.items()})\n losses.update(proposal_losses)\n return losses\n\n processed_results = []\n for sem_seg_result, detector_result, input_per_image, image_size in zip(\n sem_seg_results, detector_results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width)\n detector_r = detector_postprocess(detector_result, height, width)\n\n processed_results.append({\"sem_seg\": sem_seg_r, \"instances\": detector_r})\n\n if self.combine_on:\n panoptic_r = combine_semantic_and_instance_outputs(\n detector_r,\n sem_seg_r.argmax(dim=0),\n self.combine_overlap_threshold,\n self.combine_stuff_area_limit,\n self.combine_instances_confidence_threshold,\n )\n processed_results[-1][\"panoptic_seg\"] = panoptic_r\n return processed_results\n\n\ndef combine_semantic_and_instance_outputs(\n instance_results,\n semantic_results,\n overlap_threshold,\n stuff_area_limit,\n instances_confidence_threshold,\n):\n \"\"\"\n Implement a simple combining logic following\n \"combine_semantic_and_instance_predictions.py\" in panopticapi\n to produce panoptic segmentation outputs.\n\n Args:\n instance_results: output of :func:`detector_postprocess`.\n semantic_results: an (H, W) tensor, each is the contiguous semantic\n category id\n\n Returns:\n panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.\n segments_info (list[dict]): Describe each segment in `panoptic_seg`.\n Each dict contains keys \"id\", \"category_id\", \"isthing\".\n \"\"\"\n panoptic_seg = torch.zeros_like(semantic_results, dtype=torch.int32)\n\n # sort instance outputs by scores\n sorted_inds = torch.argsort(-instance_results.scores)\n\n current_segment_id = 0\n segments_info = []\n\n instance_masks = instance_results.pred_masks.to(dtype=torch.bool, device=panoptic_seg.device)\n\n # Add instances one-by-one, check for overlaps with existing ones\n for inst_id in sorted_inds:\n score = instance_results.scores[inst_id].item()\n if score < instances_confidence_threshold:\n break\n mask = instance_masks[inst_id] # H,W\n mask_area = mask.sum().item()\n\n if mask_area == 0:\n continue\n\n intersect = (mask > 0) & (panoptic_seg > 0)\n intersect_area = intersect.sum().item()\n\n if intersect_area * 1.0 / mask_area > overlap_threshold:\n continue\n\n if intersect_area > 0:\n mask = mask & (panoptic_seg == 0)\n\n current_segment_id += 1\n panoptic_seg[mask] = current_segment_id\n segments_info.append(\n {\n \"id\": current_segment_id,\n \"isthing\": True,\n \"score\": score,\n \"category_id\": instance_results.pred_classes[inst_id].item(),\n \"instance_id\": inst_id.item(),\n }\n )\n\n # Add semantic results to remaining empty areas\n semantic_labels = torch.unique(semantic_results).cpu().tolist()\n for semantic_label in semantic_labels:\n if semantic_label == 0: # 0 is a special \"thing\" class\n continue\n mask = (semantic_results == semantic_label) & (panoptic_seg == 0)\n mask_area = mask.sum().item()\n if mask_area < stuff_area_limit:\n continue\n\n current_segment_id += 1\n panoptic_seg[mask] = current_segment_id\n segments_info.append(\n {\n \"id\": current_segment_id,\n \"isthing\": False,\n \"category_id\": semantic_label,\n \"area\": mask_area,\n }\n )\n\n return panoptic_seg, segments_info\n" ]
[ [ "torch.Tensor", "torch.zeros_like", "torch.unique", "torch.device", "torch.argsort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fmi-basel/zinneretal-methods
[ "c0d9d81bf5e9153c9a97729c217b5bf039625001" ]
[ "goid/training/training.py" ]
[ "import luigi\nimport os\nimport logging\nimport abc\nimport numpy as np\nfrom multiprocessing import Lock, Manager\n\nfrom .augmentations import pad_to_minsize, AugmentationMixin\nfrom .model import BuildModelMixin\n\n\ndef plot_instance_dataset(path, tf_dataset, n_samples=10):\n import matplotlib.pyplot as plt\n from matplotlib.backends.backend_pdf import PdfPages\n import numpy as np\n import inter_view.color\n\n def create_single_page(raw, y_true):\n\n zslice = None\n splots = {'raw': raw.numpy().squeeze()}\n\n for key, val in y_true.items():\n if key in ['embeddings', 'binary_seg']:\n splots[key] = val.numpy().squeeze().astype(np.int16)\n\n elif key == 'semantic_class':\n one_hot_classes = val.numpy()\n classes = np.argmax(one_hot_classes,\n axis=-1) + one_hot_classes.sum(axis=-1) - 1\n splots[key] = classes.astype(np.int16)\n\n elif key == 'separator':\n splots[key] = val.numpy().squeeze().astype(np.float32)\n\n # special case, raw input for separator prediction has 4 channels, only show the mean\n splots['raw'] = splots['raw'].mean(axis=-1)\n\n else:\n raise KeyError(\n 'unrecognized dataset key: {}, expected [\"embeddings\", \"binary_seg\", \"semantic_class\"]'\n .format(key))\n\n if splots['raw'].ndim >= 3:\n zslice = splots['raw'].shape[0] // 2\n\n for key in splots.keys():\n splots[key] = splots[key][zslice]\n\n fig, axs = plt.subplots(1, len(splots), figsize=(len(splots) * 6, 6))\n for ax, (key, val) in zip(axs.flat, splots.items()):\n if key == 'raw':\n ax.imshow(val, cmap='Greys_r')\n elif key == 'separator':\n ax.imshow(val, cmap='viridis')\n else:\n ax.imshow(val,\n cmap='blk_glasbey_hv',\n interpolation='nearest',\n vmin=-1,\n vmax=254)\n\n ax.set_xlabel(key)\n ax.set_title('Min: {:4.1f}, Max: {:4.1f}'.format(\n val.min(), val.max()))\n\n plt.tight_layout()\n\n with PdfPages(path) as pdf:\n for raw, y_true in tf_dataset.unbatch().take(n_samples):\n create_single_page(raw, y_true)\n pdf.savefig(bbox_inches='tight')\n plt.close()\n\n\nclass TrainingMixin:\n '''Adds for default training parameters as luigi task parameters\n and provides the common_callbacks() method.\n\n '''\n\n # yapf: disable\n traindir = luigi.Parameter(description='training base directory.')\n train_batch_size = luigi.IntParameter(description='training batch size.')\n valid_batch_size = luigi.IntParameter(description='validation batch size.')\n epochs = luigi.IntParameter(description='number of training epochs.')\n lr_min = luigi.FloatParameter(description='minimum learning rate.')\n lr_max = luigi.FloatParameter(description='maximum learning rate.')\n epoch_to_restart_growth = luigi.FloatParameter(2., description='growth factor of the number of epochs after each restart')\n n_restarts = luigi.IntParameter(1, description='number of restarts for the cosine annealing scheduler')\n patience = luigi.IntParameter(200, description='number of epochs without improvment to wait before stopping')\n patch_size = luigi.TupleParameter(description='training patch size')\n resume_weights = luigi.OptionalParameter(None, description='path to weigths used to resume training')\n plot_dataset = luigi.BoolParameter(True, description='plot samples from from the training set to pdf at the beginning of training', parsing=luigi.BoolParameter.EXPLICIT_PARSING)\n # yapf: enable\n\n def common_callbacks(self, output_folder):\n '''creates several keras callbacks to be used in model.fit or\n model.fit_generator.\n \n '''\n\n from dlutils.training.callbacks import ModelConfigSaver\n from tensorflow.keras.callbacks import ModelCheckpoint\n from tensorflow.keras.callbacks import TensorBoard\n from tensorflow.keras.callbacks import LearningRateScheduler\n from tensorflow.keras.callbacks import TerminateOnNaN\n from tensorflow.keras.callbacks import EarlyStopping\n\n from dlutils.training.scheduler import CosineAnnealingSchedule\n\n n_restarts_factor = sum(self.epoch_to_restart_growth**x\n for x in range(self.n_restarts))\n\n epochs_to_restart = (self.epochs + 1) / n_restarts_factor\n if epochs_to_restart < 1:\n raise ValueError(\n 'Initial epoch_to_restart ({}) < 1. Decrease n_restarts ({}) or epoch_to_restart_growth ({})'\n .format(epochs_to_restart, self.n_restarts,\n self.epoch_to_restart_growth))\n\n epochs_to_restart = int(np.ceil(epochs_to_restart))\n\n callbacks = []\n\n if self.lr_max != self.lr_min:\n callbacks.append(\n LearningRateScheduler(\n CosineAnnealingSchedule(\n lr_max=self.lr_max,\n lr_min=self.lr_min,\n epoch_max=epochs_to_restart,\n epoch_max_growth=self.epoch_to_restart_growth,\n reset_decay=1.)))\n\n callbacks.extend([\n TerminateOnNaN(),\n TensorBoard(os.path.join(output_folder, 'tensorboard-logs'),\n write_graph=True,\n write_grads=False,\n write_images=False,\n histogram_freq=0),\n ModelCheckpoint(os.path.join(output_folder, 'weights_best.h5'),\n save_best_only=True,\n save_weights_only=True),\n ModelCheckpoint(os.path.join(output_folder, 'weights_latest.h5'),\n save_best_only=False,\n save_weights_only=True),\n ])\n\n if self.patience is not None and self.patience >= 1:\n callbacks.append(EarlyStopping(patience=self.patience))\n\n return callbacks\n\n @abc.abstractmethod\n def split_samples(self, data):\n '''Function to apply after data augmentation to get training input/output pair'''\n\n pass\n\n\n# NOTE alternatively check actual usage with nvidia-smi, (e.g. nvgpu python wrapper)\n# (tensorflow would have to be initialized and allocate memory before releasing lock)\nclass GPUTask(luigi.Task):\n\n _lock = Lock()\n _used_gpus = Manager().dict()\n\n resources = {'gpu_task': 1}\n\n def _acquire_gpu(self):\n\n import tensorflow as tf\n physical_devices = tf.config.list_physical_devices('GPU')\n\n self.gpu_idx = -1\n gpu_device = []\n with GPUTask._lock:\n for idx, device in enumerate(physical_devices):\n if not GPUTask._used_gpus.get(idx, False):\n GPUTask._used_gpus[idx] = True\n self.gpu_idx = idx\n gpu_device = [device]\n break\n\n if self.gpu_idx < 0:\n raise RuntimeError(\n 'no available GPU found. Check that luigi resources \"gpu_task\" matches the number of physical GPU'\n )\n # TODO try get \"gpu_task\" from luigi config and compare to number of available gpu\n # log warning instead and attempt to run on cpu?\n\n # print('Placing on GPU {}'.format(self.gpu_idx))\n tf.config.set_visible_devices(gpu_device, 'GPU')\n\n # to be able to estimate VRAM usage with nvidia-smi\n tf.config.experimental.set_memory_growth(\n physical_devices[self.gpu_idx], True)\n\n def _release_gpu(self):\n if hasattr(self, 'gpu_idx'):\n with GPUTask._lock:\n GPUTask._used_gpus[self.gpu_idx] = False\n\n def run(self):\n self._acquire_gpu()\n self.gpu_run()\n self._release_gpu()\n\n def on_failure(self, exception):\n self._release_gpu()\n return super().on_failure(exception)\n\n @abc.abstractmethod\n def gpu_run(self):\n pass\n\n\nclass JaccardLossParams(luigi.Config):\n jaccard_hinge = luigi.FloatParameter(\n 0.3, description='lower hinge for binary Jaccard loss')\n jaccard_eps = luigi.FloatParameter(\n 1., description='epsilon/smoothing parameter for binary Jaccard loss')\n\n\nclass ModelFittingBaseTask(GPUTask, BuildModelMixin, AugmentationMixin,\n TrainingMixin):\n '''\n\n '''\n foreground_weight = luigi.FloatParameter(default=1)\n draw_dataset = luigi.BoolParameter(\n True,\n description=\n 'plot samples from from the training set to pdf at the beginning of training',\n parsing=luigi.BoolParameter.EXPLICIT_PARSING)\n\n def output(self):\n return luigi.LocalTarget(os.path.join(self.traindir, self.model_name))\n\n @abc.abstractmethod\n def get_training_losses(self):\n '''Function to apply after data augmentation to get training input/output pair'''\n\n pass\n\n @abc.abstractmethod\n def _get_parser_fun(self):\n '''Returns a record parser function'''\n pass\n\n def gpu_run(self):\n\n import tensorflow as tf\n from dlutils.dataset.dataset import create_dataset\n\n with self.output().temporary_path() as model_dir:\n logger = logging.getLogger('luigi-interface')\n logger.info('Starting training model: {}'.format(model_dir))\n\n augmentations = self.get_augmentations() + [self.split_samples]\n\n trainset = create_dataset(self.input()['train'].path,\n batch_size=self.train_batch_size,\n parser_fn=self._get_parser_fun(),\n transforms=augmentations,\n shuffle_buffer=500,\n shuffle=True,\n drop_remainder=False,\n cache_after_parse=False,\n patch_size=self.patch_size)\n\n validset = create_dataset(self.input()['valid'].path,\n batch_size=self.valid_batch_size,\n parser_fn=self._get_parser_fun(),\n transforms=[self.split_samples],\n drop_remainder=False,\n cache_after_parse=False,\n patch_size=self.patch_size)\n\n model = self.construct_model()\n model.save(model_dir)\n\n if self.plot_dataset:\n logger = logging.getLogger('luigi-interface')\n logger.info('plotting nuclei training examples to pdf')\n plot_instance_dataset(\n os.path.join(model_dir, 'training_samples.pdf'), trainset,\n 100)\n\n model.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate=self.lr_max),\n loss=self.get_training_losses(),\n metrics=None)\n\n if self.resume_weights:\n model.load_weights(self.resume_weights)\n\n history = model.fit(trainset,\n validation_data=validset,\n epochs=self.epochs,\n callbacks=self.common_callbacks(model_dir))\n\n\nclass InferenceModelExportBaseTask(luigi.Task):\n def run(self):\n import tensorflow as tf\n\n model = tf.keras.models.load_model(self.input().path, compile=False)\n model.load_weights(os.path.join(self.input().path,\n 'weights_latest.h5'))\n\n with self.output().temporary_path() as temp_output_path:\n tf.saved_model.save(model,\n export_dir=temp_output_path,\n signatures=self.serve_signatures(model))\n\n def serve_signatures(self, model):\n return {}\n\n def output(self):\n return luigi.LocalTarget(self.input().path + '_inference')\n" ]
[ [ "matplotlib.backends.backend_pdf.PdfPages", "matplotlib.pyplot.tight_layout", "tensorflow.config.experimental.set_memory_growth", "numpy.ceil", "tensorflow.keras.optimizers.Adam", "tensorflow.config.list_physical_devices", "tensorflow.keras.callbacks.TerminateOnNaN", "matplotlib.pyplot.close", "numpy.argmax", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.config.set_visible_devices" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
apyskir/steppy-toolkit
[ "3190054954aeab043ced1c079d87bdd3582bb232" ]
[ "toolkit/pytorch_transformers/callbacks.py" ]
[ "import os\nfrom datetime import datetime, timedelta\n\nfrom deepsense import neptune\nfrom steppy.utils import get_logger\nfrom torch.optim.lr_scheduler import ExponentialLR\n\nfrom toolkit.pytorch_transformers.utils import Averager, persist_torch_model\nfrom toolkit.pytorch_transformers.validation import score_model\n\nlogger = get_logger()\n\n\nclass Callback:\n def __init__(self):\n self.epoch_id = None\n self.batch_id = None\n\n self.model = None\n self.optimizer = None\n self.loss_function = None\n self.output_names = None\n self.validation_datagen = None\n self.lr_scheduler = None\n\n def set_params(self, transformer, validation_datagen):\n self.model = transformer.model\n self.optimizer = transformer.optimizer\n self.loss_function = transformer.loss_function\n self.output_names = transformer.output_names\n self.validation_datagen = validation_datagen\n self.validation_loss = transformer.validation_loss\n\n def on_train_begin(self, *args, **kwargs):\n self.epoch_id = 0\n self.batch_id = 0\n\n def on_train_end(self, *args, **kwargs):\n pass\n\n def on_epoch_begin(self, *args, **kwargs):\n pass\n\n def on_epoch_end(self, *args, **kwargs):\n self.epoch_id += 1\n\n def training_break(self, *args, **kwargs):\n return False\n\n def on_batch_begin(self, *args, **kwargs):\n pass\n\n def on_batch_end(self, *args, **kwargs):\n self.batch_id += 1\n\n def get_validation_loss(self):\n if self.validation_loss is None:\n self.validation_loss = {}\n return self.validation_loss.setdefault(self.epoch_id, score_model(self.model,\n self.loss_function,\n self.validation_datagen))\n\n\nclass CallbackList:\n def __init__(self, callbacks=None):\n if callbacks is None:\n self.callbacks = []\n elif isinstance(callbacks, Callback):\n self.callbacks = [callbacks]\n else:\n self.callbacks = callbacks\n\n def __len__(self):\n return len(self.callbacks)\n\n def set_params(self, *args, **kwargs):\n for callback in self.callbacks:\n callback.set_params(*args, **kwargs)\n\n def on_train_begin(self, *args, **kwargs):\n for callback in self.callbacks:\n callback.on_train_begin(*args, **kwargs)\n\n def on_train_end(self, *args, **kwargs):\n for callback in self.callbacks:\n callback.on_train_end(*args, **kwargs)\n\n def on_epoch_begin(self, *args, **kwargs):\n for callback in self.callbacks:\n callback.on_epoch_begin(*args, **kwargs)\n\n def on_epoch_end(self, *args, **kwargs):\n for callback in self.callbacks:\n callback.on_epoch_end(*args, **kwargs)\n\n def training_break(self, *args, **kwargs):\n callback_out = [callback.training_break(*args, **kwargs) for callback in self.callbacks]\n return any(callback_out)\n\n def on_batch_begin(self, *args, **kwargs):\n for callback in self.callbacks:\n callback.on_batch_begin(*args, **kwargs)\n\n def on_batch_end(self, *args, **kwargs):\n for callback in self.callbacks:\n callback.on_batch_end(*args, **kwargs)\n\n\nclass TrainingMonitor(Callback):\n def __init__(self, epoch_every=None, batch_every=None):\n super().__init__()\n self.epoch_loss_averagers = {}\n if epoch_every == 0:\n self.epoch_every = False\n else:\n self.epoch_every = epoch_every\n if batch_every == 0:\n self.batch_every = False\n else:\n self.batch_every = batch_every\n\n def on_train_begin(self, *args, **kwargs):\n self.epoch_loss_averagers = {}\n self.epoch_id = 0\n self.batch_id = 0\n\n def on_epoch_end(self, *args, **kwargs):\n for name, averager in self.epoch_loss_averagers.items():\n epoch_avg_loss = averager.value\n averager.reset()\n if self.epoch_every and ((self.epoch_id % self.epoch_every) == 0):\n logger.info('epoch {0} {1}: {2:.5f}'.format(self.epoch_id, name, epoch_avg_loss))\n self.epoch_id += 1\n\n def on_batch_end(self, metrics, *args, **kwargs):\n for name, loss in metrics.items():\n loss = loss.data.cpu().numpy()[0]\n if name in self.epoch_loss_averagers.keys():\n self.epoch_loss_averagers[name].send(loss)\n else:\n self.epoch_loss_averagers[name] = Averager()\n self.epoch_loss_averagers[name].send(loss)\n\n if self.batch_every and ((self.batch_id % self.batch_every) == 0):\n logger.info('epoch {0} batch {1} {2}: {3:.5f}'.format(self.epoch_id, self.batch_id, name, loss))\n self.batch_id += 1\n\n\nclass ValidationMonitor(Callback):\n def __init__(self, epoch_every=None, batch_every=None):\n super().__init__()\n if epoch_every == 0:\n self.epoch_every = False\n else:\n self.epoch_every = epoch_every\n if batch_every == 0:\n self.batch_every = False\n else:\n self.batch_every = batch_every\n\n def on_epoch_end(self, *args, **kwargs):\n if self.epoch_every and ((self.epoch_id % self.epoch_every) == 0):\n self.model.eval()\n val_loss = self.get_validation_loss()\n self.model.train()\n for name, loss in val_loss.items():\n loss = loss.data.cpu().numpy()[0]\n logger.info('epoch {0} validation {1}: {2:.5f}'.format(self.epoch_id, name, loss))\n self.epoch_id += 1\n\n\nclass EarlyStopping(Callback):\n def __init__(self, patience, minimize=True):\n super().__init__()\n self.patience = patience\n self.minimize = minimize\n self.best_score = None\n self.epoch_since_best = 0\n\n def training_break(self, *args, **kwargs):\n self.model.eval()\n val_loss = self.get_validation_loss()\n loss_sum = val_loss['sum']\n loss_sum = loss_sum.data.cpu().numpy()[0]\n\n self.model.train()\n\n if not self.best_score:\n self.best_score = loss_sum\n\n if (self.minimize and loss_sum < self.best_score) or (not self.minimize and loss_sum > self.best_score):\n self.best_score = loss_sum\n self.epoch_since_best = 0\n else:\n self.epoch_since_best += 1\n\n if self.epoch_since_best > self.patience:\n return True\n else:\n return False\n\n\nclass ExponentialLRScheduler(Callback):\n def __init__(self, gamma, epoch_every=1, batch_every=None):\n super().__init__()\n self.gamma = gamma\n if epoch_every == 0:\n self.epoch_every = False\n else:\n self.epoch_every = epoch_every\n if batch_every == 0:\n self.batch_every = False\n else:\n self.batch_every = batch_every\n\n def set_params(self, transformer, validation_datagen):\n self.validation_datagen = validation_datagen\n self.model = transformer.model\n self.optimizer = transformer.optimizer\n self.loss_function = transformer.loss_function\n self.lr_scheduler = ExponentialLR(self.optimizer, self.gamma, last_epoch=-1)\n\n def on_train_begin(self, *args, **kwargs):\n self.epoch_id = 0\n self.batch_id = 0\n logger.info('initial lr: {0}'.format(self.optimizer.state_dict()['param_groups'][0]['initial_lr']))\n\n def on_epoch_end(self, *args, **kwargs):\n if self.epoch_every and (((self.epoch_id + 1) % self.epoch_every) == 0):\n self.lr_scheduler.step()\n logger.info('epoch {0} current lr: {1}'.format(self.epoch_id + 1,\n self.optimizer.state_dict()['param_groups'][0]['lr']))\n self.epoch_id += 1\n\n def on_batch_end(self, *args, **kwargs):\n if self.batch_every and ((self.batch_id % self.batch_every) == 0):\n self.lr_scheduler.step()\n logger.info('epoch {0} batch {1} current lr: {2}'.format(\n self.epoch_id + 1, self.batch_id + 1, self.optimizer.state_dict()['param_groups'][0]['lr']))\n self.batch_id += 1\n\n\nclass ModelCheckpoint(Callback):\n def __init__(self, filepath, epoch_every=1, minimize=True):\n super().__init__()\n self.filepath = filepath\n self.minimize = minimize\n self.best_score = None\n\n if epoch_every == 0:\n self.epoch_every = False\n else:\n self.epoch_every = epoch_every\n\n def on_train_begin(self, *args, **kwargs):\n self.epoch_id = 0\n self.batch_id = 0\n os.makedirs(os.path.dirname(self.filepath), exist_ok=True)\n\n def on_epoch_end(self, *args, **kwargs):\n if self.epoch_every and ((self.epoch_id % self.epoch_every) == 0):\n self.model.eval()\n val_loss = self.get_validation_loss()\n loss_sum = val_loss['sum']\n loss_sum = loss_sum.data.cpu().numpy()[0]\n\n self.model.train()\n\n if not self.best_score:\n self.best_score = loss_sum\n\n if (self.minimize and loss_sum < self.best_score) or (not self.minimize and loss_sum > self.best_score) or (\n self.epoch_id == 0):\n self.best_score = loss_sum\n persist_torch_model(self.model, self.filepath)\n logger.info('epoch {0} model persisted to {1}'.format(self.epoch_id, self.filepath))\n\n self.epoch_id += 1\n\n\nclass NeptuneMonitor(Callback):\n def __init__(self, model_name):\n super().__init__()\n self.model_name = model_name\n self.ctx = neptune.Context()\n self.epoch_loss_averager = Averager()\n\n def on_train_begin(self, *args, **kwargs):\n self.epoch_loss_averagers = {}\n self.epoch_id = 0\n self.batch_id = 0\n\n def on_batch_end(self, metrics, *args, **kwargs):\n for name, loss in metrics.items():\n loss = loss.data.cpu().numpy()[0]\n\n if name in self.epoch_loss_averagers.keys():\n self.epoch_loss_averagers[name].send(loss)\n else:\n self.epoch_loss_averagers[name] = Averager()\n self.epoch_loss_averagers[name].send(loss)\n\n self.ctx.channel_send('{} batch {} loss'.format(self.model_name, name), x=self.batch_id, y=loss)\n\n self.batch_id += 1\n\n def on_epoch_end(self, *args, **kwargs):\n self._send_numeric_channels()\n self.epoch_id += 1\n\n def _send_numeric_channels(self, *args, **kwargs):\n for name, averager in self.epoch_loss_averagers.items():\n epoch_avg_loss = averager.value\n averager.reset()\n self.ctx.channel_send('{} epoch {} loss'.format(self.model_name, name), x=self.epoch_id, y=epoch_avg_loss)\n\n self.model.eval()\n val_loss = self.get_validation_loss()\n self.model.train()\n for name, loss in val_loss.items():\n loss = loss.data.cpu().numpy()[0]\n self.ctx.channel_send('{} epoch_val {} loss'.format(self.model_name, name), x=self.epoch_id, y=loss)\n\n\nclass ExperimentTiming(Callback):\n def __init__(self, epoch_every=None, batch_every=None):\n super().__init__()\n if epoch_every == 0:\n self.epoch_every = False\n else:\n self.epoch_every = epoch_every\n if batch_every == 0:\n self.batch_every = False\n else:\n self.batch_every = batch_every\n self.batch_start = None\n self.epoch_start = None\n self.current_sum = None\n self.current_mean = None\n\n def on_train_begin(self, *args, **kwargs):\n self.epoch_id = 0\n self.batch_id = 0\n logger.info('starting training...')\n\n def on_train_end(self, *args, **kwargs):\n logger.info('training finished')\n\n def on_epoch_begin(self, *args, **kwargs):\n if self.epoch_id > 0:\n epoch_time = datetime.now() - self.epoch_start\n if self.epoch_every:\n if (self.epoch_id % self.epoch_every) == 0:\n logger.info('epoch {0} time {1}'.format(self.epoch_id - 1, str(epoch_time)[:-7]))\n self.epoch_start = datetime.now()\n self.current_sum = timedelta()\n self.current_mean = timedelta()\n logger.info('epoch {0} ...'.format(self.epoch_id))\n\n def on_batch_begin(self, *args, **kwargs):\n if self.batch_id > 0:\n current_delta = datetime.now() - self.batch_start\n self.current_sum += current_delta\n self.current_mean = self.current_sum / self.batch_id\n if self.batch_every:\n if self.batch_id > 0 and (((self.batch_id - 1) % self.batch_every) == 0):\n logger.info('epoch {0} average batch time: {1}'.format(self.epoch_id, str(self.current_mean)[:-5]))\n if self.batch_every:\n if self.batch_id == 0 or self.batch_id % self.batch_every == 0:\n logger.info('epoch {0} batch {1} ...'.format(self.epoch_id, self.batch_id))\n self.batch_start = datetime.now()\n\n\nclass ReduceLROnPlateau(Callback): # thank you keras\n def __init__(self):\n super().__init__()\n pass\n" ]
[ [ "torch.optim.lr_scheduler.ExponentialLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MendelXu/pytorch_geometric
[ "676972f32507dbdec16c4a4ab46166032e5bfc04" ]
[ "test/nn/models/test_jumping_knowledge.py" ]
[ "import torch\nfrom torch_geometric.nn import JumpingKnowledge\n\n\ndef test_jumping_knowledge():\n num_nodes, channels, num_layers = 100, 16, 4\n xs = list([torch.randn(num_nodes, channels) for _ in range(num_layers)])\n\n model = JumpingKnowledge('cat')\n assert model.__repr__() == 'JumpingKnowledge(cat)'\n assert model(xs).size() == (num_nodes, channels * num_layers)\n\n model = JumpingKnowledge('max')\n assert model.__repr__() == 'JumpingKnowledge(max)'\n assert model(xs).size() == (num_nodes, channels)\n\n model = JumpingKnowledge('lstm', channels, num_layers)\n assert model.__repr__() == 'JumpingKnowledge(lstm)'\n assert model(xs).size() == (num_nodes, channels)\n" ]
[ [ "torch.randn" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AndryRafam/Hate_Speech
[ "f3350a77fcbdb7a9b5845a5ba71470f2540cd000" ]
[ "GRU/gru.py" ]
[ "import pickle\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport re\nimport nltk\nimport string\nimport random\n\nfrom zipfile import ZipFile\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tokenize.treebank import TreebankWordDetokenizer\nfrom tensorflow.keras.layers import Embedding, GRU, Dense\nfrom tensorflow.keras import Input, Model\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.losses import SparseCategoricalCrossentropy\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.models import load_model\n\nrandom.seed(42)\nnp.random.seed(42)\ntf.random.set_seed(42)\n\nclass Hate_Speech():\n\t\n\tdef unzip(self,nm):\n\t\twith ZipFile(nm,\"r\") as zip:\n\t\t\tzip.extractall()\n\n\tdef preprocess(self,y):\n\t\tself.x = y.lower()\n\t\tself.x = self.x.encode(\"ascii\",\"ignore\").decode()\n\t\tself.x = re.sub(\"https*\\S+\",\" \",self.x)\n\t\tself.x = re.sub(\"@\\S+\",\" \",self.x)\n\t\tself.x = re.sub(\"#\\S+\",\" \",self.x)\n\t\tself.x = re.sub(\"\\'\\w+\",\"\",self.x)\n\t\tself.x = re.sub(\"[%s]\" % re.escape(string.punctuation),\" \", self.x)\n\t\tself.x = re.sub(\"\\w*\\d+\\w*\",\"\",self.x)\n\t\tself.x = re.sub(\"\\s{2,}\",\" \",self.x)\n\t\treturn self.x\n\n\tdef tokenize(self,y):\n\t\tfor self.x in y:\n\t\t\tyield(word_tokenize(str(self.x)))\n\n\tdef detokenize(self,txt):\n\t\treturn TreebankWordDetokenizer().detokenize(txt)\n\n\tdef model(self,inputs):\n\t\tself.x = Embedding(max_words,128)(inputs)\n\t\tself.x = GRU(64,return_sequences=True)(self.x)\n\t\tself.x = GRU(64)(self.x)\n\t\tself.outputs = Dense(3,activation=\"softmax\")(self.x)\n\t\tself.model = Model(inputs,self.outputs)\n\t\treturn self.model\n\nht = Hate_Speech()\n\t\nht.unzip(\"archive.zip\")\ndf = pd.read_csv(\"hate_speech.csv\")\ntemp = []\ndata_to_list = df[\"tweet\"].values.tolist()\nfor i in range(len(data_to_list)):\n\ttemp.append(ht.preprocess(data_to_list[i]))\n\t\t\t\ndata_words = list(ht.tokenize(temp))\n\t\nfinal_data = []\nfor i in range(len(data_words)):\n\tfinal_data.append(ht.detokenize(data_words[i]))\nprint(final_data[:5])\nfinal_data = np.array(final_data)\n\nmax_words = 20000\nmax_len = 200\n\ntokenizer = Tokenizer(num_words = max_words)\ntokenizer.fit_on_texts(final_data)\nsequences = tokenizer.texts_to_sequences(final_data)\ntweets = pad_sequences(sequences,maxlen=max_len)\nwith open(\"tokenizer.pickle\",\"wb\") as handle:\n\tpickle.dump(tokenizer,handle,protocol=pickle.HIGHEST_PROTOCOL)\nprint(tweets)\n\nlabels = df[\"class\"]\n\nx_train,x_test,y_train,y_test = train_test_split(tweets,labels,random_state=42)\nx_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size=0.25,random_state=42)\n\nmodel = ht.model(Input(shape=(None,),dtype=\"int32\"))\nmodel.summary()\nmodel.compile(Adam(),SparseCategoricalCrossentropy(),metrics=[\"accuracy\"])\n\ncheckpoint = ModelCheckpoint(\"hate_speech.h5\",monitor=\"val_accuracy\",save_best_only=True,save_weights_only=False)\nmodel.fit(x_train,y_train,batch_size=32,epochs=5,validation_data=(x_val,y_val),callbacks=[checkpoint])\nbest = load_model(\"hate_speech.h5\")\nloss,acc = best.evaluate(x_test,y_test)\nprint(\"\\nTest acc: {:.2f} %\".format(100*acc))\nprint(\"Test loss: {:.2f} %\".format(100*loss))" ]
[ [ "tensorflow.keras.preprocessing.text.Tokenizer", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.models.load_model", "pandas.read_csv", "tensorflow.keras.Input", "numpy.random.seed", "tensorflow.keras.layers.Embedding", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense", "sklearn.model_selection.train_test_split", "tensorflow.keras.Model", "tensorflow.keras.layers.GRU", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.preprocessing.sequence.pad_sequences", "numpy.array", "tensorflow.random.set_seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
LeeZChuan/DTVis-master
[ "397f99c63c7746986962d00a7b2ebe012f3d88c8" ]
[ "static/script/pythonCode/address_data.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: LeeZChuan\n\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport requests\nimport os\nfrom pandas.core.frame import DataFrame\n\npd.set_option('display.max_columns',1000)\npd.set_option('display.width', 1000)\npd.set_option('display.max_colwidth',1000)\n\ndef read_csv(filename):\n subfile = 'datasets2/'\n name_title = filename[:-3]\n fileInfo = subfile+filename\n# raw_train_df = pd.read_csv(fileInfo, sep='\\s+', engine='python').loc[:,[name_title+'arrive_time',name_title+'starting_lng',name_title+'starting_lat',name_title+'dest_lng',name_title+'dest_lat']]\n raw_train_df = pd.read_csv(fileInfo, sep='\\s+', engine='python')\n return raw_train_df\n\n\ndef locatebyLatLng(lat, lng, pois=0):\n '''\n 根据经纬度查询地址\n '''\n items = {'location': str(lat) + ',' + str(lng), 'ak': 'fFYPupo84cWdICPUbxXkb9GYww0VYD5Y', 'output': 'json'}\n res = requests.get('http://api.map.baidu.com/geocoder/v2/', params=items)\n result = res.json()\n# print(result)\n# print('--------------------------------------------')\n #result = result['result']['formatted_address'] + ',' + result['result']['sematic_description']\n city = result['result']['addressComponent']['city']\n district = result['result']['addressComponent']['district']\n street = result['result']['addressComponent']['street']\n return city, district, street\n\n\n\ndef groupByDay(raw_df):\n groupedByDate = raw_df.groupby([name_title+'arrive_time'])\n\n \n for group_name, group_data in groupedByDate:\n data_one_date = []\n date = str(group_name)\n print (\"dealing-------\"+date)\n print (group_data.shape)\n for index, item in group_data.iterrows():\n data_each_line = [] \n data_each_line.append(item[name_title+'starting_lng'])\n data_each_line.append(item[name_title+'starting_lat'])\n data_each_line.append(item[name_title+'dest_lng'])\n data_each_line.append(item[name_title+'dest_lat'])\n data_one_date.append(data_each_line)\n\n df = pd.DataFrame(data_one_date, columns=['starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']) \n fileInfo = 'datasets/'+date+'.txt'\n df.to_csv(fileInfo, encoding='utf_8_sig')\n \n\ndef groupByDay2(raw_df):\n groupedByDate = raw_df.groupby([name_title+'arrive_time'])\n\n \n for group_name, group_data in groupedByDate:\n data_one_date = []\n date = str(group_name)\n print (\"dealing-------\"+date)\n print (group_data.shape)\n for index, item in group_data.iterrows():\n data_each_line = []\n data_each_line.append(item[name_title+'order_id'])\n data_each_line.append(item[name_title+'product_id'])\n data_each_line.append(item[name_title+'type'])\n data_each_line.append(item[name_title+'combo_type'])\n data_each_line.append(item[name_title+'traffic_type'])\n data_each_line.append(item[name_title+'passenger_count'])\n data_each_line.append(item[name_title+'driver_product_id'])\n data_each_line.append(item[name_title+'start_dest_distance'])\n data_each_line.append(item[name_title+'arrive_time'])\n data_each_line.append(item[name_title+'departure_time'])\n data_each_line.append(item[name_title+'pre_total_fee'])\n data_each_line.append(item[name_title+'normal_time'])\n data_each_line.append(item[name_title+'bubble_trace_id'])\n data_each_line.append(item[name_title+'product_1level'])\n data_each_line.append(item[name_title+'year'])\n data_each_line.append(item[name_title+'month'])\n data_each_line.append(item[name_title+'day'])\n data_each_line.append(item[name_title+'starting_lng'])\n data_each_line.append(item[name_title+'starting_lat'])\n data_each_line.append(item[name_title+'dest_lng'])\n data_each_line.append(item[name_title+'dest_lat'])\n data_one_date.append(data_each_line)\n \n \n\n df = pd.DataFrame(data_one_date, columns=['order_id','product_id','type','combo_type','traffic_type','passenger_count', 'driver_product_id', 'start_dest_distance', 'arrive_time', 'departure_time', 'pre_total_fee', 'normal_time', 'bubble_trace_id', 'product_1level', 'year', 'month', 'day', 'starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']) \n fileInfo = 'datasets/'+date+'.txt'\n df.to_csv(fileInfo, encoding='utf_8_sig') \n \n\ndef groupByDay3(raw_df):\n groupedByDate = raw_df.groupby([name_title+'arrive_time'])\n\n \n for group_name, group_data in groupedByDate:\n date = str(group_name)\n print (\"dealing-------\"+date)\n print (group_data.shape)\n data_one_date = []\n \n for index, item in group_data.iterrows():\n data_each_line = []\n data_each_line.append(item[name_title+'order_id'])\n data_each_line.append(item[name_title+'product_id'])\n data_each_line.append(item[name_title+'city_id'])\n data_each_line.append(item[name_title+'district'])\n data_each_line.append(item[name_title+'county'])\n data_each_line.append(item[name_title+'type'])\n data_each_line.append(item[name_title+'combo_type'])\n data_each_line.append(item[name_title+'traffic_type'])\n data_each_line.append(item[name_title+'passenger_count'])\n data_each_line.append(item[name_title+'driver_product_id'])\n# data_each_line.append(item[name_title+'start_dest_distance'])\n data_each_line.append(item[name_title+'arrive_time'])\n# data_each_line.append(item[name_title+'departure_time'])\n data_each_line.append(item[name_title+'pre_total_fee'])\n data_each_line.append(item[name_title+'normal_time'])\n data_each_line.append(item[name_title+'bubble_trace_id'])\n data_each_line.append(item[name_title+'product_1level'])\n data_each_line.append(item[name_title+'year'])\n data_each_line.append(item[name_title+'month'])\n data_each_line.append(item[name_title+'day'])\n data_each_line.append(item[name_title+'starting_lng'])\n data_each_line.append(item[name_title+'starting_lat'])\n data_each_line.append(item[name_title+'dest_lng'])\n data_each_line.append(item[name_title+'dest_lat'])\n data_one_date.append(data_each_line)\n \n \n\n df = pd.DataFrame(data_one_date, columns=['city_id','district', 'county', 'type','combo_type','traffic_type','passenger_count', 'driver_product_id', 'start_dest_distance', 'arrive_time', 'departure_time', 'pre_total_fee', 'normal_time', 'bubble_trace_id', 'product_1level', 'year', 'month', 'day', 'starting_lng', 'starting_lat', 'dest_lng', 'dest_lat'])\n fileInfo = 'datasets2/'+date+'.txt'\n df.to_csv(fileInfo, encoding='utf_8_sig') \n \n \n \n\ndef streetNamefind(filename):\n subfile = 'datasets/'+filename[:-1]\n raw_train_files = os.listdir(subfile)\n for eachFile in raw_train_files[1037:]:\n raw_file_df = pd.read_csv(subfile+'/'+eachFile, engine='python').loc[:,['starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']]\n print ('streetNameFind--'+eachFile)\n streetNames = []\n i = 1\n for index, item in raw_file_df.iterrows():\n streetName = []\n _, _, s_street = locatebyLatLng(item['starting_lat'], item['starting_lng'], pois=0)\n streetName.append(s_street)\n _, _, d_street = locatebyLatLng(item['dest_lat'], item['dest_lng'], pois=0)\n streetName.append(d_street)\n print (eachFile, '---', i, ' ---starting--', s_street, 'dest--', d_street)\n streetNames.append(streetName)\n i += 1\n \n df_street = pd.DataFrame(streetNames, columns=['starting_street', 'dest_street'])\n fileInfo = 'datasets/'+name_title[:-1]+'/'+eachFile[:-4]+'.txt'\n df_street.to_csv(fileInfo, encoding='utf_8_sig') \n\n\ndef fileCut(filename):\n subfile = 'datasets/'+filename[:-1]\n raw_train_files = os.listdir(subfile)\n print (subfile, raw_train_files)\n for eachFile in raw_train_files:\n raw_file_df = pd.read_csv(subfile+'/'+eachFile, engine='python').loc[:,['starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']]\n i = 0\n j = 1\n len_data = raw_file_df.shape[0]\n coordinatesNames = []\n for index, item in raw_file_df.iterrows():\n coordinatesName = []\n coordinatesName.append(item['starting_lng'])\n coordinatesName.append(item['starting_lat'])\n coordinatesName.append(item['dest_lng'])\n coordinatesName.append(item['dest_lat'])\n coordinatesNames.append(coordinatesName)\n i += 1\n if i%30==0 or i==len_data:\n df_coordinate = pd.DataFrame(coordinatesNames, columns=['starting_lng', 'starting_lat', 'dest_lng', 'dest_lat'])\n fileInfo = 'datasets/'+name_title[:-1]+'/'+str(j).zfill(4)+'.txt'\n print ('dealing--' + fileInfo)\n df_coordinate.to_csv(fileInfo, encoding='utf_8_sig') \n coordinatesNames = []\n j += 1\n \n \n\n\n\n\ndef fileMerge(filename):\n subfile = 'datasets/'+filename[:-1]\n raw_train_files = os.listdir(subfile)\n saveFile = subfile+'/all_data.txt'\n i = 1\n for eachFile in raw_train_files:\n filename = subfile+'/'+eachFile\n print (\"processing : \" + filename)\n if i == 1:\n filecon = pd.read_csv(filename, header=0)\n# filecon = pd.read_csv(filename, header=None)\n else:\n filecon = pd.read_csv(filename)\n i += 1\n filecon.to_csv(saveFile, mode='a', index=False, header=False, encoding='utf_8_sig')\n \n\ndef dataSta(filename):\n subfile = 'datasets/'+filename[:-1]\n fileName = subfile+'/all_data.txt'\n raw_file_df = pd.read_csv(fileName, engine='python', encoding='utf_8_sig').loc[:,['starting_street', 'dest_street']]\n raw_file_df.dropna(axis=0, how='any', inplace=True) #删除含有空值的行\n result = raw_file_df.groupby([raw_file_df['starting_street'],raw_file_df['dest_street']])\n \n all_result = []\n name_result = []\n for name, item in result:\n each_result = []\n each_result.append(name[0])\n each_result.append(name[1])\n each_result.append(len(item))\n all_result.append(each_result)\n \n name_result.append(name[0])\n name_result.append(name[1])\n \n\n\n name_df = DataFrame(name_result)\n name_df = name_df[0].unique()\n name_list = name_df.tolist()\n \n print (name_list)\n\n strValue = \"data:[\"\n for item in name_list:\n strValue = strValue+\"{name:'\"+item+\"'},\"\n strValue = strValue[:-1]\n strValue = strValue + \"],\\n\"\n \n strValue = strValue + \"links:[\"\n for item in all_result:\n strValue = strValue+\"{source:'\"+item[0]+\"', target:'\"+item[1]+\"', value:\"+str(item[2])+\"},\"\n \n strValue = strValue[:-1]\n strValue = strValue + \"]\"\n\n \n# path = os.getcwd()+'\\dataForMulberryFigure\\\\'+filename[:-1]+'.txt'\n name_path = os.getcwd()+'\\dataForMulberryFigure\\\\'+filename[:-1]+'_nodes_links.txt'\n# a = open(path, \"w\",encoding='UTF-8')\n# a.write(str(all_result))\n# a.close()\n \n def save(filename, contents):\n fh = open(filename, 'w', encoding='utf-8')\n fh.write(contents)\n fh.close()\n save(name_path, strValue)\n\n\nif __name__ == '__main__':\n filename = \"dwv_order_make_haikou_8.txt\"\n name_title = filename[:-3] \n\n \n \n##按日期划分数据集\n# raw_df = read_csv(filename)\n# \n## print (raw_df.head(2))\n# \n# raw_df[name_title+'arrive_time'] = pd.to_datetime(raw_df[name_title+'arrive_time'],format=\"%Y-%m-%d\").dt.date\n# raw_df = raw_df[~(raw_df[name_title+'arrive_time']=='0000-00-00')]\n# groupByDay3(raw_df)\n\n\n#坐标转换 \n# streetNamefind(name_title)\n \n##分批坐标转换\n# 1. 切分文件\n# fileCut(name_title)\n# #2. 坐标转换\n# streetNamefind(name_title)\n #3. 文件合并\n# fileMerge(name_title)\n \n #4. 统计及格式操作\n# dataSta(name_title)\n\n \n \n\n\n\t\t\t\t\t\t\t\t\t" ]
[ [ "pandas.core.frame.DataFrame", "pandas.set_option", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
balarsen/Bridge
[ "258d79558bb9871f2e0e51b69b02aecf6a129ed9" ]
[ "Bridge/Bidding.py" ]
[ "import itertools\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\n\n\"\"\"\nthis module is quite complicated, it will need to both do the mechanics of bidding\nbut also allow for puggable \"smarts\" accordng to different conventions\n\"\"\"\n\n\nclass Redeal(Exception):\n pass\n\n\nclass Bidding_logic(object):\n __metaclass__ = ABCMeta\n\n def __init__(self, hand): # create an instance per bidder\n self.hand = hand\n\n @abstractmethod\n def openBid(self):\n pass\n\n @abstractmethod\n def raiseBid(self, bids):\n pass\n\n @abstractmethod\n def jumpBid(self, bids):\n pass\n\n\nclass Bidding(list):\n def __init__(self, leader='north'):\n self.leader = leader.title()\n _seats = ['North', 'East', 'South', 'West']\n leader_ind = _seats.index(self.leader)\n self._seats = np.roll(['North', 'East', 'South', 'West'], leader_ind).tolist()\n self.nextBidder = itertools.cycle(self._seats)\n self._passCount = 0\n\n def addBid(self, seat, bid):\n if self._passCount >= 4: # done\n raise (ValueError('Four passes have already occured'))\n if len(self) > 0: # new bid mist be larger than the old one\n try:\n old_winner = self._winningBid()[1]\n except TypeError:\n old_winner = None\n if old_winner is not None: # we have another bid\n if not bid > old_winner:\n raise (ValueError('New bid must be higher than old bid'))\n if bid.value == 'pass':\n self._passCount += 1\n else:\n self._passCount = 0\n self.append((seat, bid))\n if self._passCount >= 4: # done\n if not self.opened: # no one opened\n raise (Redeal(\"No one opened\"))\n return self._winningBid()\n else:\n return None\n\n def _winningBid(self):\n \"\"\"\n figure the winning bid\n \"\"\"\n for seat, bid in reversed(self):\n if bid.value != 'pass':\n return (seat, bid)\n\n def nextBid(self, bid):\n \"\"\"\n subset of above, just goes in order\n \"\"\"\n return self.addBid(self._seats[len(self) % 4], bid)\n\n @property\n def opened(self):\n \"\"\"\n return True if someonehas opened, False otherwise\n \"\"\"\n for bid in self:\n if bid[1].value != 'pass':\n return True\n return False\n" ]
[ [ "numpy.roll" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NPMP-7/CBLBs
[ "1f9380c275cdb229c186b84bc45b3b63a8a960a9" ]
[ "cblb/run_ODE_NAND_NOR_XOR_extended.py" ]
[ "from scipy.integrate import ode\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\n\nfrom models_extended import *\nfrom parameters import *\n\n\"\"\"\n[[(S1, I1)], []]\n\"\"\"\n\"\"\"\nstates = [([0,0], [0,0,0,0]), ([0,0], [1,0,0,0]), \n ([1,0], [0,0,0,0]), ([1,0], [0,1,0,0]), \n ([0,1], [0,0,0,0]), ([0,1], [0,0,1,0]), \n ([1,1], [0,0,0,0]), ([1,1], [0,0,0,1])]\n\"\"\"\n\n\"\"\"\nstates = [([0,0], [0,0,0,0]), ([0,0], [1,0,0,0]), \n ([1,0], [1,0,0,0]), ([1,0], [1,1,0,0]), \n ([0,1], [1,1,0,0]), ([0,1], [1,1,1,0]), \n ([1,1], [1,1,1,0]), ([1,1], [1,1,1,1])]\n\n\"\"\"\nstates_null = [([0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0])]\n\nstates_NAND = [([0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0]),\n ([0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 0]),\n ([0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 0]),\n ([0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0]),\n ([1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0]),\n ([1, 0, 1], [1, 1, 1, 1, 1, 1, 1, 0]),\n ([1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 0]),\n ([1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0])]\n\nstates_NOR = [([0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0]),\n ([0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0]),\n ([0, 1, 0], [1, 0, 0, 0, 0, 0, 0, 0]),\n ([0, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0]),\n ([1, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0]),\n ([1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0]),\n ([1, 1, 0], [1, 0, 0, 0, 0, 0, 0, 0]),\n ([1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0])]\n\nstates_XOR = [([0, 0, 0], [0, 1, 1, 0, 1, 0, 0, 1]),\n ([0, 0, 1], [0, 1, 1, 0, 1, 0, 0, 1]),\n ([0, 1, 0], [0, 1, 1, 0, 1, 0, 0, 1]),\n ([0, 1, 1], [0, 1, 1, 0, 1, 0, 0, 1]),\n ([1, 0, 0], [0, 1, 1, 0, 1, 0, 0, 1]),\n ([1, 0, 1], [0, 1, 1, 0, 1, 0, 0, 1]),\n ([1, 1, 0], [0, 1, 1, 0, 1, 0, 0, 1]),\n ([1, 1, 1], [0, 1, 1, 0, 1, 0, 0, 1])]\n\nstates = states_null + states_NAND + states_NOR + states_XOR\n\n# simulation parameters (for a single state)\nt_end = 1000\nN = t_end\n\nrho_x = 0\nrho_y = 0\n\n\"\"\"\nrho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = 0, 5, 5, 0, 5, 0, 5, 0\n\nparams = (delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, delta_y, rho_x, rho_y, gamma_x, theta_x, r_X, r_Y, \n rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b)\n\"\"\"\n\n# Y0 = np.zeros(59)\nY0 = np.zeros(128)\n\n# number of cells: toggle switches\nN_I0 = np.array([1, 1])\nN_I1 = np.array([1, 1])\nN_I2 = np.array([1, 1])\nN_I3 = np.array([1, 1])\nN_I4 = np.array([1, 1])\nN_I5 = np.array([1, 1])\nN_I6 = np.array([1, 1])\nN_I7 = np.array([1, 1])\n\nY0[4:6] = N_I0\nY0[10:12] = N_I1\nY0[16:18] = N_I2\nY0[22:24] = N_I3\nY0[28:30] = N_I4\nY0[34:36] = N_I5\nY0[40:42] = N_I6\nY0[46:48] = N_I7\n\n# number of cells: mux\nY0[86:127] = 1 # number of cells\n\n\"\"\"\nsimulations\n\"\"\"\n\nfor iteration, state in enumerate(states):\n\n S = state[0]\n I = state[1]\n I0, I1, I2, I3, I4, I5, I6, I7 = I\n\n if iteration > 0 and states[iteration - 1][1] == I:\n # rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = (1-I0) * 5, I0*5, (1-I1)*5, I1*5, (1-I2)*5, I2*5, (1-I3)*5, I3*5\n rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b, rho_I4_a, rho_I4_b, rho_I5_a, rho_I5_b, rho_I6_a, rho_I6_b, rho_I7_a, rho_I7_b = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n else:\n rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b, rho_I4_a, rho_I4_b, rho_I5_a, rho_I5_b, rho_I6_a, rho_I6_b, rho_I7_a, rho_I7_b = (1 - I0) * 5, I0 * 5, (1 - I1) * 5, I1 * 5, (1 - I2) * 5, I2 * 5, (1 - I3) * 5, I3 * 5, (1 - I4) * 5, I4 * 5, (1 - I5) * 5, I5 * 5, (1 - I6) * 5, I6 * 5, (1 - I7) * 5, I7 * 5\n\n rho_x, rho_y = 0, 0\n params = (delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, delta_y, rho_x, rho_y, gamma_x, theta_x, r_X, r_Y, rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b, rho_I4_a, rho_I4_b, rho_I5_a, rho_I5_b, rho_I6_a, rho_I6_b, rho_I7_a, rho_I7_b)\n\n if iteration:\n Y0 = Y_last[-1, :]\n\n # Y0[24:26] = S\n Y0[48:51] = S\n\n # initialization\n\n T = np.linspace(0, t_end, N)\n\n t1 = t_end\n dt = t_end / N\n T = np.arange(0, t1 + dt, dt)\n # Y = np.zeros([1 + N, 59])\n Y = np.zeros([1 + N, 128])\n Y[0, :] = Y0\n\n # simulation\n r = ode(CLB_extended_model_ODE).set_integrator('zvode', method='bdf')\n r.set_initial_value(Y0, T[0]).set_f_params(params)\n\n i = 1\n while r.successful() and r.t < t1:\n Y[i, :] = r.integrate(r.t + dt)\n i += 1\n\n # hold the state after half of the simulation time!\n if r.t > t1 / 2:\n params = (\n delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, delta_y, rho_x, rho_y, gamma_x, theta_x,\n r_X, r_Y,\n 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0)\n r.set_f_params(params)\n\n Y_last = Y\n if not iteration:\n Y_full = Y\n T_full = T\n else:\n Y_full = np.append(Y_full, Y, axis=0)\n T_full = np.append(T_full, T + iteration * t_end, axis=0)\n\nY = Y_full\nT = T_full\n\nS0, S1, S2 = Y[:, 48], Y[:, 49], Y[:, 50]\n\nI0_a, I0_b = Y[:, 2], Y[:, 3]\nI1_a, I1_b = Y[:, 8], Y[:, 9]\nI2_a, I2_b = Y[:, 14], Y[:, 15]\nI3_a, I3_b = Y[:, 20], Y[:, 21]\nI4_a, I4_b = Y[:, 26], Y[:, 27]\nI5_a, I5_b = Y[:, 32], Y[:, 33]\nI6_a, I6_b = Y[:, 38], Y[:, 39]\nI7_a, I7_b = Y[:, 44], Y[:, 45]\n\nout = Y[:, -1]\n\n# plot\n\nax1 = plt.subplot(311)\nax1.plot(T, I0_a, color=mcolors.TABLEAU_COLORS[\"tab:blue\"], alpha=0.25)\nax1.plot(T, I1_a, color=mcolors.TABLEAU_COLORS[\"tab:orange\"], alpha=0.25)\nax1.plot(T, I2_a, color=mcolors.TABLEAU_COLORS[\"tab:green\"], alpha=0.25)\nax1.plot(T, I3_a, color=mcolors.TABLEAU_COLORS[\"tab:red\"], alpha=0.25)\nax1.plot(T, I4_a, color=mcolors.TABLEAU_COLORS[\"tab:purple\"], alpha=0.25)\nax1.plot(T, I5_a, color=mcolors.TABLEAU_COLORS[\"tab:brown\"], alpha=0.25)\nax1.plot(T, I6_a, color=mcolors.TABLEAU_COLORS[\"tab:pink\"], alpha=0.25)\nax1.plot(T, I7_a, color=mcolors.TABLEAU_COLORS[\"tab:olive\"], alpha=0.25)\n\nax1.set_xlabel(\"Time [min]\")\nax1.set_ylabel(\"Concentrations [nM]\")\n\nax1.legend([\"$I_0$\", \"$I_1$\", \"$I_2$\", \"$I_3$\", \"$I_4$\", \"$I_5$\", \"$I_6$\", \"$I_7$\"])\nax1.set_xlabel(\"Time [min]\")\nax1.set_ylabel(\"Concentrations [nM]\")\n\nax5 = plt.subplot(312)\nax5.plot(T, S0, color=mcolors.TABLEAU_COLORS[\"tab:pink\"], alpha=0.25)\nax5.plot(T, S1, color=mcolors.TABLEAU_COLORS[\"tab:orange\"], alpha=0.25)\nax5.plot(T, S2, color=mcolors.TABLEAU_COLORS[\"tab:blue\"], alpha=0.25)\n\nax5.legend([\"$x_1 = S_0$\", \"$x_2 = S_1$\", \"$x_3 = S_2$\"])\n# ax5.set_title('Select inputs')\nax5.set_xlabel(\"Time [min]\")\nax5.set_ylabel(\"Concentrations [nM]\")\n\nax6 = plt.subplot(313)\nax6.plot(T, out, color=\"#8080805a\", alpha=0.40)\n# ax6.set_title('out')\nax6.legend(['$y$ = out'])\nax6.set_xlabel(\"Time [min]\")\nax6.set_ylabel(\"Concentrations [nM]\")\n\nticks = np.arange(0, 25, 1) * 1000\ntick_labels = list(map(str, ticks))\nfor i in range(len(tick_labels)):\n if i % 2 == 1:\n tick_labels[i] = \"\"\n\nax1.set_xticks(ticks)\nax5.set_xticks(ticks)\nax6.set_xticks(ticks)\n\nax1.set_xticklabels(tick_labels)\nax5.set_xticklabels(tick_labels)\nax6.set_xticklabels(tick_labels)\n\n# plt.suptitle(\"$out = \\\\overline{S}_1 \\\\overline{S}_0 I_0 \\\\vee \\\\overline{S}_1 S_0 I_1 \\\\vee S_1 \\\\overline{S}_0 I_2 \\\\vee S_1 S_0 I_3$\")\nplt.gcf().set_size_inches(15, 10)\nplt.savefig(\"figs\\\\NAND_NOR_XOR_ode_extended.pdf\", bbox_inches='tight')\n\n\n\nplt.show()\n\n\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.gcf", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "scipy.integrate.ode" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
angnuoli/Coursework
[ "2197e20de5da2bc820b3c179f871835f8d02f771" ]
[ "Machine Learning/part 2-regression/decision-tree-regression/decision_tree_regression.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: gjxhlan\n\"\"\"\n# Regression template\n# 1. Data Preprocessing\n\n# Import the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:,1:2].values # try to make X as matrix, y as vector\ny = dataset.iloc[:,2].values\n\n# Fitting the Decision Tree Regression to the dataset\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state = 0)\nregressor.fit(X, y)\n\n# Predicting a new result\ny_pred = regressor.predict(6.5)\n\n# 3. Visualising the dataset results (for higher resolution and smoother curve)\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, c='red')\nplt.plot(X, regressor.predict(X), color = 'blue')\nplt.title('Salary vs Level (Decision Tree Regression)')\nplt.xlabel('Level')\nplt.ylabel('Salary')\nplt.show()\n\nplt.scatter(X, y, c='red')\nplt.plot(X_grid, regressor.predict(X_grid), color = 'blue')\nplt.title('Salary vs Level (Decision Tree Regression)')\nplt.xlabel('Level')\nplt.ylabel('Salary')\nplt.show()" ]
[ [ "pandas.read_csv", "sklearn.tree.DecisionTreeRegressor", "matplotlib.pyplot.title", "matplotlib.pyplot.scatter", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
quantopian-enterprise/blaze
[ "6b686bed87993494b11676ed25e7b30f18ca2248" ]
[ "blaze/compute/tests/test_pytables_compute.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport os\nimport pytest\ntb = pytest.importorskip('tables')\n\nimport numpy as np\n\nfrom blaze.compute.core import compute\nfrom blaze.expr import symbol\nfrom blaze import drop, discover, create_index\nfrom blaze.utils import tmpfile\n\n\nt = symbol('t', 'var * {id: int, name: string, amount: int}')\n\nx = np.array([(1, 'Alice', 100),\n (2, 'Bob', -200),\n (3, 'Charlie', 300),\n (4, 'Denis', 400),\n (5, 'Edith', -500)],\n dtype=[('id', '<i8'), ('name', 'S7'), ('amount', '<i8')])\n\n\[email protected]_fixture\ndef data():\n with tmpfile('.h5') as filename:\n f = tb.open_file(filename, mode='w')\n d = f.create_table('/', 'title', x)\n yield d\n d.close()\n f.close()\n\n\[email protected]_fixture\ndef csi_data():\n with tmpfile('.h5') as filename:\n f = tb.open_file(filename, mode='w')\n d = f.create_table('/', 'title', x)\n d.cols.amount.create_csindex()\n d.cols.id.create_csindex()\n yield d\n d.close()\n f.close()\n\n\[email protected]_fixture\ndef idx_data():\n with tmpfile('.h5') as fn:\n f = tb.open_file(fn, mode='w')\n d = f.create_table('/', 'title', x)\n d.cols.amount.create_index()\n d.cols.id.create_index()\n yield d\n d.close()\n f.close()\n\n\ndef eq(a, b):\n return (a == b).all()\n\n\ndef test_discover_datashape(data):\n ds = discover(data)\n t = symbol('t', ds)\n columns = t.fields\n assert columns is not None\n\n\ndef test_symbol(data):\n assert compute(t, data) == data\n assert isinstance(data, tb.Table)\n\n\ndef test_single_column(data):\n assert eq(compute(t['name'], data), x['name'])\n\n\ndef test_projection(data):\n assert eq(compute(t[['name', 'amount']], data), x[['name', 'amount']])\n\n\ndef test_eq(data):\n assert eq(compute(t['amount'] == 100, data), x['amount'] == 100)\n\n\ndef test_scalar_ops(data):\n from operator import add, sub, mul, truediv\n\n for op in (add, sub, mul, truediv):\n assert eq(compute(op(t.amount, 10), data), op(x['amount'], 10))\n assert eq(compute(op(t.amount, t.id), data), op(x['amount'], x['id']))\n assert eq(compute(op(10.0, t.amount), data), op(10.0, x['amount']))\n assert eq(compute(op(10, t.amount), data), op(10, x['amount']))\n\n\ndef test_neg(data):\n assert eq(compute(-t.amount, data), -x['amount'])\n\n\ndef test_failing_floordiv(data):\n from operator import floordiv as op\n\n with pytest.raises(TypeError):\n assert eq(compute(op(t.amount, 10), data), op(x['amount'], 10))\n\n with pytest.raises(TypeError):\n assert eq(compute(op(t.amount, t.id), data), op(x['amount'], x['id']))\n\n with pytest.raises(TypeError):\n assert eq(compute(op(10.0, t.amount), data), op(10.0, x['amount']))\n\n with pytest.raises(TypeError):\n assert eq(compute(op(10, t.amount), data), op(10, x['amount']))\n\n\ndef test_selection(data):\n assert eq(compute(t[t['amount'] == 100], data), x[x['amount'] == 0])\n assert eq(compute(t[t['amount'] < 0], data), x[x['amount'] < 0])\n\n\ndef test_arithmetic(data):\n assert eq(compute(t['amount'] + t['id'], data), x['amount'] + x['id'])\n assert eq(compute(t['amount'] * t['id'], data), x['amount'] * x['id'])\n assert eq(compute(t['amount'] % t['id'], data), x['amount'] % x['id'])\n assert eq(compute(t['amount'] + t['id'] + 3, data),\n x['amount'] + x['id'] + 3)\n\n\ndef test_reductions(data):\n assert compute(t['amount'].count(), data) == len(x['amount'])\n assert compute(t['amount'].sum(), data) == x['amount'].sum()\n assert compute(t['amount'].mean(), data) == x['amount'].mean()\n assert compute(t.amount[0], data) == x['amount'][0]\n assert compute(t.amount[-1], data) == x['amount'][-1]\n\n\nclass TestTopLevelReductions(object):\n def test_count(self, data):\n from blaze import count\n assert compute(count(t['amount']), data) == len(x['amount'])\n\n def test_sum(self, data):\n from blaze import sum\n assert compute(sum(t['amount']), data) == x['amount'].sum()\n\n def test_mean(self, data):\n from blaze import mean\n assert compute(mean(t['amount']), data) == x['amount'].mean()\n\n\nclass TestFailingSort(object):\n \"\"\"These fail because we haven't created a completely sorted index\"\"\"\n\n def test_basic(self, data):\n with pytest.raises(ValueError):\n compute(t.sort('id'), data)\n\n @pytest.mark.xfail(\n raises=TypeError,\n reason='PyTables does not support multiple column sorting'\n )\n def test_multiple_columns(self, data):\n compute(t.sort(['amount', 'id']), data)\n\n @pytest.mark.xfail(\n raises=TypeError,\n reason='PyTables does not support multiple column sorting'\n )\n def test_multiple_columns_sorted_data(self, csi_data):\n compute(t.sort(['amount', 'id']), csi_data)\n\n\nclass TestCSISort(object):\n def test_basic(self, csi_data):\n assert eq(compute(t.sort('amount'), csi_data),\n np.sort(x, order='amount'))\n assert eq(compute(t.sort('id'), csi_data),\n np.sort(x, order='id'))\n\n def test_column_expr(self, csi_data):\n assert eq(compute(t.sort(t.amount), csi_data),\n np.sort(x, order='amount'))\n assert eq(compute(t.sort(t.id), csi_data),\n np.sort(x, order='id'))\n\n def test_non_existent_column(self, csi_data):\n with pytest.raises(ValueError):\n compute(t.sort('not here'), csi_data)\n\n def test_ascending(self, csi_data):\n assert eq(compute(t.sort('amount', ascending=False), csi_data),\n np.sort(x, order='amount')[::-1])\n assert eq(compute(t.sort('amount', ascending=False), csi_data),\n np.sort(x, order='amount')[::-1])\n\n\nclass TestIndexSort(object):\n \"\"\"Fails with a partially sorted index\"\"\"\n\n @pytest.mark.xfail(\n raises=ValueError,\n reason='PyTables cannot sort with a standard index'\n )\n def test_basic(self, idx_data):\n compute(t.sort('amount'), idx_data)\n\n @pytest.mark.xfail(\n raises=ValueError,\n reason='PyTables cannot sort with a standard index'\n )\n def test_ascending(self, idx_data):\n compute(t.sort('amount', ascending=False), idx_data)\n\n\ndef test_head(data):\n assert eq(compute(t.head(2), data), x[:2])\n assert eq(compute(t.amount.head(2), data), x['amount'][:2])\n\n\[email protected]_fixture\ndef pyt():\n tb = pytest.importorskip('tables')\n fn = 'test.pyt.h5'\n f = tb.open_file(fn, mode='w')\n d = f.create_table('/', 'test', x)\n try:\n yield d\n finally:\n d.close()\n f.close()\n try:\n os.remove(fn)\n except OSError:\n pass\n\n\ndef test_drop(pyt):\n drop(pyt)\n with pytest.raises(tb.ClosedNodeError):\n drop(pyt)\n\n\ndef test_create_index(pyt):\n create_index(pyt, 'id')\n assert 'id' in pyt.colindexes\n\n\ndef test_create_multiple_indexes(pyt):\n create_index(pyt, ['id', 'amount'])\n assert len(pyt.colindexes) == 2\n assert 'id' in pyt.colindexes\n assert 'amount' in pyt.colindexes\n\n\ndef test_create_multiple_indexes_fails(pyt):\n with pytest.raises(ValueError):\n create_index(pyt, ['id', 'blarg'])\n\n with pytest.raises(ValueError):\n create_index(pyt, ['foo', 'bar'])\n\n\ndef test_create_index_fails(pyt):\n with pytest.raises(AttributeError):\n create_index(pyt, 'no column here!')\n\n\ndef test_nrows():\n assert compute(t.nrows, x) == len(x)\n\n\ndef test_nelements():\n assert compute(t.nelements(axis=0), x) == len(x)\n assert compute(t.nelements(), x) == len(x)\n" ]
[ [ "numpy.array", "numpy.sort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
forbin-capital/zipline
[ "6c8ecd83fae6ce4f7bc7c8ad9e43164020770e76" ]
[ "zipline/pipeline/engine.py" ]
[ "\"\"\"\nComputation engines for executing Pipelines.\n\nThis module defines the core computation algorithms for executing Pipelines.\n\nThe primary entrypoint of this file is SimplePipelineEngine.run_pipeline, which\nimplements the following algorithm for executing pipelines:\n\n1. Determine the domain of the pipeline. The domain determines the\n top-level set of dates and assets that serve as row- and\n column-labels for the computations performed by this\n pipeline. This logic lives in\n zipline.pipeline.domain.infer_domain.\n\n2. Build a dependency graph of all terms in `pipeline`, with\n information about how many extra rows each term needs from its\n inputs. At this point we also **specialize** any generic\n LoadableTerms to the domain determined in (1). This logic lives in\n zipline.pipeline.graph.TermGraph and\n zipline.pipeline.graph.ExecutionPlan.\n\n3. Combine the domain computed in (2) with our AssetFinder to produce\n a \"lifetimes matrix\". The lifetimes matrix is a DataFrame of\n booleans whose labels are dates x assets. Each entry corresponds\n to a (date, asset) pair and indicates whether the asset in\n question was tradable on the date in question. This logic\n primarily lives in AssetFinder.lifetimes.\n\n4. Call self._populate_initial_workspace, which produces a\n \"workspace\" dictionary containing cached or otherwise pre-computed\n terms. By default, the initial workspace contains the lifetimes\n matrix and its date labels.\n\n5. Topologically sort the graph constructed in (1) to produce an\n execution order for any terms that were not pre-populated. This\n logic lives in TermGraph.\n\n6. Iterate over the terms in the order computed in (5). For each term:\n\n a. Fetch the term's inputs from the workspace, possibly removing\n unneeded leading rows from the input (see ExecutionPlan.offset\n for details on why we might have extra leading rows).\n\n b. Call ``term._compute`` with the inputs. Store the results into\n the workspace.\n\n c. Decrement \"reference counts\" on the term's inputs, and remove\n their results from the workspace if the refcount hits 0. This\n significantly reduces the maximum amount of memory that we\n consume during execution\n\n This logic lives in SimplePipelineEngine.compute_chunk.\n\n7. Extract the pipeline's outputs from the workspace and convert them\n into \"narrow\" format, with output labels dictated by the Pipeline's\n screen. This logic lives in SimplePipelineEngine._to_narrow.\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\nfrom functools import partial\n\nfrom six import iteritems, with_metaclass, viewkeys\nfrom numpy import array, arange\nfrom pandas import DataFrame, MultiIndex\nfrom toolz import groupby\n\nfrom zipline.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray\nfrom zipline.errors import NoFurtherDataError\nfrom zipline.utils.input_validation import expect_types\nfrom zipline.utils.numpy_utils import (\n as_column,\n repeat_first_axis,\n repeat_last_axis,\n)\nfrom zipline.utils.pandas_utils import explode\n\nfrom .domain import Domain, GENERIC\nfrom .graph import maybe_specialize\nfrom .hooks import DelegatingHooks\nfrom .term import AssetExists, InputDates, LoadableTerm\n\nfrom zipline.utils.date_utils import compute_date_range_chunks\nfrom zipline.utils.pandas_utils import categorical_df_concat\n\n\nclass PipelineEngine(with_metaclass(ABCMeta)):\n\n @abstractmethod\n def run_pipeline(self, pipeline, start_date, end_date, hooks=None):\n \"\"\"\n Compute values for ``pipeline`` from ``start_date`` to ``end_date``.\n\n Parameters\n ----------\n pipeline : zipline.pipeline.Pipeline\n The pipeline to run.\n start_date : pd.Timestamp\n Start date of the computed matrix.\n end_date : pd.Timestamp\n End date of the computed matrix.\n hooks : list[implements(PipelineHooks)], optional\n Hooks for instrumenting Pipeline execution.\n\n Returns\n -------\n result : pd.DataFrame\n A frame of computed results.\n\n The ``result`` columns correspond to the entries of\n `pipeline.columns`, which should be a dictionary mapping strings to\n instances of :class:`zipline.pipeline.Term`.\n\n For each date between ``start_date`` and ``end_date``, ``result``\n will contain a row for each asset that passed `pipeline.screen`.\n A screen of ``None`` indicates that a row should be returned for\n each asset that existed each day.\n \"\"\"\n raise NotImplementedError(\"run_pipeline\")\n\n @abstractmethod\n def run_chunked_pipeline(self,\n pipeline,\n start_date,\n end_date,\n chunksize,\n hooks=None):\n \"\"\"\n Compute values for ``pipeline`` from ``start_date`` to ``end_date``, in\n date chunks of size ``chunksize``.\n\n Chunked execution reduces memory consumption, and may reduce\n computation time depending on the contents of your pipeline.\n\n Parameters\n ----------\n pipeline : Pipeline\n The pipeline to run.\n start_date : pd.Timestamp\n The start date to run the pipeline for.\n end_date : pd.Timestamp\n The end date to run the pipeline for.\n chunksize : int\n The number of days to execute at a time.\n hooks : list[implements(PipelineHooks)], optional\n Hooks for instrumenting Pipeline execution.\n\n Returns\n -------\n result : pd.DataFrame\n A frame of computed results.\n\n The ``result`` columns correspond to the entries of\n `pipeline.columns`, which should be a dictionary mapping strings to\n instances of :class:`zipline.pipeline.Term`.\n\n For each date between ``start_date`` and ``end_date``, ``result``\n will contain a row for each asset that passed `pipeline.screen`.\n A screen of ``None`` indicates that a row should be returned for\n each asset that existed each day.\n\n See Also\n --------\n :meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`\n \"\"\"\n raise NotImplementedError(\"run_chunked_pipeline\")\n\n\nclass NoEngineRegistered(Exception):\n \"\"\"\n Raised if a user tries to call pipeline_output in an algorithm that hasn't\n set up a pipeline engine.\n \"\"\"\n\n\nclass ExplodingPipelineEngine(PipelineEngine):\n \"\"\"\n A PipelineEngine that doesn't do anything.\n \"\"\"\n def run_pipeline(self, pipeline, start_date, end_date, hooks=None):\n raise NoEngineRegistered(\n \"Attempted to run a pipeline but no pipeline \"\n \"resources were registered.\"\n )\n\n def run_chunked_pipeline(self,\n pipeline,\n start_date,\n end_date,\n chunksize,\n hooks=None):\n raise NoEngineRegistered(\n \"Attempted to run a chunked pipeline but no pipeline \"\n \"resources were registered.\"\n )\n\n\ndef default_populate_initial_workspace(initial_workspace,\n root_mask_term,\n execution_plan,\n dates,\n assets):\n \"\"\"The default implementation for ``populate_initial_workspace``. This\n function returns the ``initial_workspace`` argument without making any\n modifications.\n\n Parameters\n ----------\n initial_workspace : dict[array-like]\n The initial workspace before we have populated it with any cached\n terms.\n root_mask_term : Term\n The root mask term, normally ``AssetExists()``. This is needed to\n compute the dates for individual terms.\n execution_plan : ExecutionPlan\n The execution plan for the pipeline being run.\n dates : pd.DatetimeIndex\n All of the dates being requested in this pipeline run including\n the extra dates for look back windows.\n assets : pd.Int64Index\n All of the assets that exist for the window being computed.\n\n Returns\n -------\n populated_initial_workspace : dict[term, array-like]\n The workspace to begin computations with.\n \"\"\"\n return initial_workspace\n\n\nclass SimplePipelineEngine(PipelineEngine):\n \"\"\"\n PipelineEngine class that computes each term independently.\n\n Parameters\n ----------\n get_loader : callable\n A function that is given a loadable term and returns a PipelineLoader\n to use to retrieve raw data for that term.\n asset_finder : zipline.assets.AssetFinder\n An AssetFinder instance. We depend on the AssetFinder to determine\n which assets are in the top-level universe at any point in time.\n populate_initial_workspace : callable, optional\n A function which will be used to populate the initial workspace when\n computing a pipeline. See\n :func:`zipline.pipeline.engine.default_populate_initial_workspace`\n for more info.\n default_hooks : list, optional\n List of hooks that should be used to instrument all pipelines executed\n by this engine.\n\n See Also\n --------\n :func:`zipline.pipeline.engine.default_populate_initial_workspace`\n \"\"\"\n __slots__ = (\n '_get_loader',\n '_finder',\n '_root_mask_term',\n '_root_mask_dates_term',\n '_populate_initial_workspace',\n )\n\n @expect_types(\n default_domain=Domain,\n __funcname='SimplePipelineEngine',\n )\n def __init__(self,\n get_loader,\n asset_finder,\n default_domain=GENERIC,\n populate_initial_workspace=None,\n default_hooks=None):\n\n self._get_loader = get_loader\n self._finder = asset_finder\n\n self._root_mask_term = AssetExists()\n self._root_mask_dates_term = InputDates()\n\n self._populate_initial_workspace = (\n populate_initial_workspace or default_populate_initial_workspace\n )\n self._default_domain = default_domain\n\n if default_hooks is None:\n self._default_hooks = []\n else:\n self._default_hooks = list(default_hooks)\n\n def run_chunked_pipeline(self,\n pipeline,\n start_date,\n end_date,\n chunksize,\n hooks=None):\n \"\"\"\n Compute values for ``pipeline`` from ``start_date`` to ``end_date``, in\n date chunks of size ``chunksize``.\n\n Chunked execution reduces memory consumption, and may reduce\n computation time depending on the contents of your pipeline.\n\n Parameters\n ----------\n pipeline : Pipeline\n The pipeline to run.\n start_date : pd.Timestamp\n The start date to run the pipeline for.\n end_date : pd.Timestamp\n The end date to run the pipeline for.\n chunksize : int\n The number of days to execute at a time.\n hooks : list[implements(PipelineHooks)], optional\n Hooks for instrumenting Pipeline execution.\n\n Returns\n -------\n result : pd.DataFrame\n A frame of computed results.\n\n The ``result`` columns correspond to the entries of\n `pipeline.columns`, which should be a dictionary mapping strings to\n instances of :class:`zipline.pipeline.Term`.\n\n For each date between ``start_date`` and ``end_date``, ``result``\n will contain a row for each asset that passed `pipeline.screen`.\n A screen of ``None`` indicates that a row should be returned for\n each asset that existed each day.\n\n See Also\n --------\n :meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`\n \"\"\"\n domain = self.resolve_domain(pipeline)\n ranges = compute_date_range_chunks(\n domain.all_sessions(),\n start_date,\n end_date,\n chunksize,\n )\n hooks = self._resolve_hooks(hooks)\n\n run_pipeline = partial(self._run_pipeline_impl, pipeline, hooks=hooks)\n with hooks.running_pipeline(pipeline, start_date, end_date):\n chunks = [run_pipeline(s, e) for s, e in ranges]\n\n if len(chunks) == 1:\n # OPTIMIZATION: Don't make an extra copy in `categorical_df_concat`\n # if we don't have to.\n return chunks[0]\n\n # Filter out empty chunks. Empty dataframes lose dtype information,\n # which makes concatenation fail.\n nonempty_chunks = [c for c in chunks if len(c)]\n return categorical_df_concat(nonempty_chunks, inplace=True)\n\n def run_pipeline(self, pipeline, start_date, end_date, hooks=None):\n \"\"\"\n Compute values for ``pipeline`` from ``start_date`` to ``end_date``.\n\n Parameters\n ----------\n pipeline : zipline.pipeline.Pipeline\n The pipeline to run.\n start_date : pd.Timestamp\n Start date of the computed matrix.\n end_date : pd.Timestamp\n End date of the computed matrix.\n hooks : list[implements(PipelineHooks)], optional\n Hooks for instrumenting Pipeline execution.\n\n Returns\n -------\n result : pd.DataFrame\n A frame of computed results.\n\n The ``result`` columns correspond to the entries of\n `pipeline.columns`, which should be a dictionary mapping strings to\n instances of :class:`zipline.pipeline.Term`.\n\n For each date between ``start_date`` and ``end_date``, ``result``\n will contain a row for each asset that passed `pipeline.screen`.\n A screen of ``None`` indicates that a row should be returned for\n each asset that existed each day.\n \"\"\"\n hooks = self._resolve_hooks(hooks)\n with hooks.running_pipeline(pipeline, start_date, end_date):\n return self._run_pipeline_impl(\n pipeline,\n start_date,\n end_date,\n hooks,\n )\n\n def _run_pipeline_impl(self, pipeline, start_date, end_date, hooks):\n \"\"\"Shared core for ``run_pipeline`` and ``run_chunked_pipeline``.\n \"\"\"\n # See notes at the top of this module for a description of the\n # algorithm implemented here.\n if end_date < start_date:\n raise ValueError(\n \"start_date must be before or equal to end_date \\n\"\n \"start_date=%s, end_date=%s\" % (start_date, end_date)\n )\n\n domain = self.resolve_domain(pipeline)\n\n plan = pipeline.to_execution_plan(\n domain, self._root_mask_term, start_date, end_date,\n )\n extra_rows = plan.extra_rows[self._root_mask_term]\n root_mask = self._compute_root_mask(\n domain, start_date, end_date, extra_rows,\n )\n dates, sids, root_mask_values = explode(root_mask)\n\n workspace = self._populate_initial_workspace(\n {\n self._root_mask_term: root_mask_values,\n self._root_mask_dates_term: as_column(dates.values)\n },\n self._root_mask_term,\n plan,\n dates,\n sids,\n )\n\n refcounts = plan.initial_refcounts(workspace)\n execution_order = plan.execution_order(workspace, refcounts)\n\n with hooks.computing_chunk(execution_order,\n start_date,\n end_date):\n\n results = self.compute_chunk(\n graph=plan,\n dates=dates,\n sids=sids,\n workspace=workspace,\n refcounts=refcounts,\n execution_order=execution_order,\n hooks=hooks,\n )\n\n return self._to_narrow(\n plan.outputs,\n results,\n results.pop(plan.screen_name),\n dates[extra_rows:],\n sids,\n )\n\n def _compute_root_mask(self, domain, start_date, end_date, extra_rows):\n \"\"\"\n Compute a lifetimes matrix from our AssetFinder, then drop columns that\n didn't exist at all during the query dates.\n\n Parameters\n ----------\n domain : zipline.pipeline.domain.Domain\n Domain for which we're computing a pipeline.\n start_date : pd.Timestamp\n Base start date for the matrix.\n end_date : pd.Timestamp\n End date for the matrix.\n extra_rows : int\n Number of extra rows to compute before `start_date`.\n Extra rows are needed by terms like moving averages that require a\n trailing window of data.\n\n Returns\n -------\n lifetimes : pd.DataFrame\n Frame of dtype `bool` containing dates from `extra_rows` days\n before `start_date`, continuing through to `end_date`. The\n returned frame contains as columns all assets in our AssetFinder\n that existed for at least one day between `start_date` and\n `end_date`.\n \"\"\"\n sessions = domain.all_sessions()\n\n if start_date not in sessions:\n raise ValueError(\n \"Pipeline start date ({}) is not a trading session for \"\n \"domain {}.\".format(start_date, domain)\n )\n\n elif end_date not in sessions:\n raise ValueError(\n \"Pipeline end date {} is not a trading session for \"\n \"domain {}.\".format(end_date, domain)\n )\n\n start_idx, end_idx = sessions.slice_locs(start_date, end_date)\n if start_idx < extra_rows:\n raise NoFurtherDataError.from_lookback_window(\n initial_message=\"Insufficient data to compute Pipeline:\",\n first_date=sessions[0],\n lookback_start=start_date,\n lookback_length=extra_rows,\n )\n\n # NOTE: This logic should probably be delegated to the domain once we\n # start adding more complex domains.\n #\n # Build lifetimes matrix reaching back to `extra_rows` days before\n # `start_date.`\n finder = self._finder\n lifetimes = finder.lifetimes(\n sessions[start_idx - extra_rows:end_idx],\n include_start_date=False,\n country_codes=(domain.country_code,),\n )\n\n if not lifetimes.columns.unique:\n columns = lifetimes.columns\n duplicated = columns[columns.duplicated()].unique()\n raise AssertionError(\"Duplicated sids: %d\" % duplicated)\n\n # Filter out columns that didn't exist from the farthest look back\n # window through the end of the requested dates.\n existed = lifetimes.any()\n ret = lifetimes.loc[:, existed]\n num_assets = ret.shape[1]\n\n if num_assets == 0:\n raise ValueError(\n \"Failed to find any assets with country_code {!r} that traded \"\n \"between {} and {}.\\n\"\n \"This probably means that your asset db is old or that it has \"\n \"incorrect country/exchange metadata.\".format(\n domain.country_code, start_date, end_date,\n )\n )\n\n return ret\n\n @staticmethod\n def _inputs_for_term(term, workspace, graph, domain, refcounts):\n \"\"\"\n Compute inputs for the given term.\n\n This is mostly complicated by the fact that for each input we store as\n many rows as will be necessary to serve **any** computation requiring\n that input.\n \"\"\"\n offsets = graph.offset\n out = []\n\n # We need to specialize here because we don't change ComputableTerm\n # after resolving domains, so they can still contain generic terms as\n # inputs.\n specialized = [maybe_specialize(t, domain) for t in term.inputs]\n\n if term.windowed:\n # If term is windowed, then all input data should be instances of\n # AdjustedArray.\n for input_ in specialized:\n adjusted_array = ensure_adjusted_array(\n workspace[input_], input_.missing_value,\n )\n out.append(\n adjusted_array.traverse(\n window_length=term.window_length,\n offset=offsets[term, input_],\n # If the refcount for the input is > 1, we will need\n # to traverse this array again so we must copy.\n # If the refcount for the input == 0, this is the last\n # traversal that will happen so we can invalidate\n # the AdjustedArray and mutate the data in place.\n copy=refcounts[input_] > 1,\n )\n )\n else:\n # If term is not windowed, input_data may be an AdjustedArray or\n # np.ndarray. Coerce the former to the latter.\n for input_ in specialized:\n input_data = ensure_ndarray(workspace[input_])\n offset = offsets[term, input_]\n # OPTIMIZATION: Don't make a copy by doing input_data[0:] if\n # offset is zero.\n if offset:\n input_data = input_data[offset:]\n out.append(input_data)\n return out\n\n def compute_chunk(self,\n graph,\n dates,\n sids,\n workspace,\n refcounts,\n execution_order,\n hooks):\n \"\"\"\n Compute the Pipeline terms in the graph for the requested start and end\n dates.\n\n This is where we do the actual work of running a pipeline.\n\n Parameters\n ----------\n graph : zipline.pipeline.graph.ExecutionPlan\n Dependency graph of the terms to be executed.\n dates : pd.DatetimeIndex\n Row labels for our root mask.\n sids : pd.Int64Index\n Column labels for our root mask.\n workspace : dict\n Map from term -> output.\n Must contain at least entry for `self._root_mask_term` whose shape\n is `(len(dates), len(assets))`, but may contain additional\n pre-computed terms for testing or optimization purposes.\n refcounts : dict[Term, int]\n Dictionary mapping terms to number of dependent terms. When a\n term's refcount hits 0, it can be safely discarded from\n ``workspace``. See TermGraph.decref_dependencies for more info.\n execution_order : list[Term]\n Order in which to execute terms.\n hooks : implements(PipelineHooks)\n Hooks to instrument pipeline execution.\n\n Returns\n -------\n results : dict\n Dictionary mapping requested results to outputs.\n \"\"\"\n self._validate_compute_chunk_params(graph, dates, sids, workspace)\n\n get_loader = self._get_loader\n\n # Copy the supplied initial workspace so we don't mutate it in place.\n workspace = workspace.copy()\n domain = graph.domain\n\n # Many loaders can fetch data more efficiently if we ask them to\n # retrieve all their inputs at once. For example, a loader backed by a\n # SQL database can fetch multiple columns from the database in a single\n # query.\n #\n # To enable these loaders to fetch their data efficiently, we group\n # together requests for LoadableTerms if they are provided by the same\n # loader and they require the same number of extra rows.\n #\n # The extra rows condition is a simplification: we don't currently have\n # a mechanism for asking a loader to fetch different windows of data\n # for different terms, so we only batch requests together when they're\n # going to produce data for the same set of dates. That may change in\n # the future if we find a loader that can still benefit significantly\n # from batching unequal-length requests.\n def loader_group_key(term):\n loader = get_loader(term)\n extra_rows = graph.extra_rows[term]\n return loader, extra_rows\n\n # Only produce loader groups for the terms we expect to load. This\n # ensures that we can run pipelines for graphs where we don't have a\n # loader registered for an atomic term if all the dependencies of that\n # term were supplied in the initial workspace.\n will_be_loaded = graph.loadable_terms - viewkeys(workspace)\n loader_groups = groupby(\n loader_group_key,\n (t for t in execution_order if t in will_be_loaded),\n )\n\n for term in execution_order:\n # `term` may have been supplied in `initial_workspace`, or we may\n # have loaded `term` as part of a batch with another term coming\n # from the same loader (see note on loader_group_key above). In\n # either case, we already have the term computed, so don't\n # recompute.\n if term in workspace:\n continue\n\n # Asset labels are always the same, but date labels vary by how\n # many extra rows are needed.\n mask, mask_dates = graph.mask_and_dates_for_term(\n term,\n self._root_mask_term,\n workspace,\n dates,\n )\n\n if isinstance(term, LoadableTerm):\n loader = get_loader(term)\n to_load = sorted(\n loader_groups[loader_group_key(term)],\n key=lambda t: t.dataset\n )\n with hooks.loading_terms(to_load):\n loaded = loader.load_adjusted_array(\n domain, to_load, mask_dates, sids, mask,\n )\n assert set(loaded) == set(to_load), (\n 'loader did not return an AdjustedArray for each column\\n'\n 'expected: %r\\n'\n 'got: %r' % (sorted(to_load), sorted(loaded))\n )\n workspace.update(loaded)\n else:\n with hooks.computing_term(term):\n workspace[term] = term._compute(\n self._inputs_for_term(\n term,\n workspace,\n graph,\n domain,\n refcounts,\n ),\n mask_dates,\n sids,\n mask,\n )\n if term.ndim == 2:\n assert workspace[term].shape == mask.shape\n else:\n assert workspace[term].shape == (mask.shape[0], 1)\n\n # Decref dependencies of ``term``, and clear any terms\n # whose refcounts hit 0.\n for garbage in graph.decref_dependencies(term, refcounts):\n del workspace[garbage]\n\n # At this point, all the output terms are in the workspace.\n out = {}\n graph_extra_rows = graph.extra_rows\n for name, term in iteritems(graph.outputs):\n # Truncate off extra rows from outputs.\n out[name] = workspace[term][graph_extra_rows[term]:]\n return out\n\n def _to_narrow(self, terms, data, mask, dates, assets):\n \"\"\"\n Convert raw computed pipeline results into a DataFrame for public APIs.\n\n Parameters\n ----------\n terms : dict[str -> Term]\n Dict mapping column names to terms.\n data : dict[str -> ndarray[ndim=2]]\n Dict mapping column names to computed results for those names.\n mask : ndarray[bool, ndim=2]\n Mask array of values to keep.\n dates : ndarray[datetime64, ndim=1]\n Row index for arrays `data` and `mask`\n assets : ndarray[int64, ndim=2]\n Column index for arrays `data` and `mask`\n\n Returns\n -------\n results : pd.DataFrame\n The indices of `results` are as follows:\n\n index : two-tiered MultiIndex of (date, asset).\n Contains an entry for each (date, asset) pair corresponding to\n a `True` value in `mask`.\n columns : Index of str\n One column per entry in `data`.\n\n If mask[date, asset] is True, then result.loc[(date, asset), colname]\n will contain the value of data[colname][date, asset].\n \"\"\"\n if not mask.any():\n # Manually handle the empty DataFrame case. This is a workaround\n # to pandas failing to tz_localize an empty dataframe with a\n # MultiIndex. It also saves us the work of applying a known-empty\n # mask to each array.\n #\n # Slicing `dates` here to preserve pandas metadata.\n empty_dates = dates[:0]\n empty_assets = array([], dtype=object)\n return DataFrame(\n data={\n name: array([], dtype=arr.dtype)\n for name, arr in iteritems(data)\n },\n index=MultiIndex.from_arrays([empty_dates, empty_assets]),\n )\n\n final_columns = {}\n for name in data:\n # Each term that computed an output has its postprocess method\n # called on the filtered result.\n #\n # As of Mon May 2 15:38:47 2016, we only use this to convert\n # LabelArrays into categoricals.\n final_columns[name] = terms[name].postprocess(data[name][mask])\n\n resolved_assets = array(self._finder.retrieve_all(assets))\n index = _pipeline_output_index(dates, resolved_assets, mask)\n\n return DataFrame(data=final_columns, index=index)\n\n def _validate_compute_chunk_params(self,\n graph,\n dates,\n sids,\n initial_workspace):\n \"\"\"\n Verify that the values passed to compute_chunk are well-formed.\n \"\"\"\n root = self._root_mask_term\n clsname = type(self).__name__\n\n # Writing this out explicitly so this errors in testing if we change\n # the name without updating this line.\n compute_chunk_name = self.compute_chunk.__name__\n if root not in initial_workspace:\n raise AssertionError(\n \"root_mask values not supplied to {cls}.{method}\".format(\n cls=clsname,\n method=compute_chunk_name,\n )\n )\n\n shape = initial_workspace[root].shape\n implied_shape = len(dates), len(sids)\n if shape != implied_shape:\n raise AssertionError(\n \"root_mask shape is {shape}, but received dates/assets \"\n \"imply that shape should be {implied}\".format(\n shape=shape,\n implied=implied_shape,\n )\n )\n\n for term in initial_workspace:\n if self._is_special_root_term(term):\n continue\n\n if term.domain is GENERIC:\n # XXX: We really shouldn't allow **any** generic terms to be\n # populated in the initial workspace. A generic term, by\n # definition, can't correspond to concrete data until it's\n # paired with a domain, and populate_initial_workspace isn't\n # given the domain of execution, so it can't possibly know what\n # data to use when populating a generic term.\n #\n # In our current implementation, however, we don't have a good\n # way to represent specializations of ComputableTerms that take\n # only generic inputs, so there's no good way for the initial\n # workspace to provide data for such terms except by populating\n # the generic ComputableTerm.\n #\n # The right fix for the above is to implement \"full\n # specialization\", i.e., implementing ``specialize`` uniformly\n # across all terms, not just LoadableTerms. Having full\n # specialization will also remove the need for all of the\n # remaining ``maybe_specialize`` calls floating around in this\n # file.\n #\n # In the meantime, disallowing ComputableTerms in the initial\n # workspace would break almost every test in\n # `test_filter`/`test_factor`/`test_classifier`, and fixing\n # them would require updating all those tests to compute with\n # more specialized terms. Once we have full specialization, we\n # can fix all the tests without a large volume of edits by\n # simply specializing their workspaces, so for now I'm leaving\n # this in place as a somewhat sharp edge.\n if isinstance(term, LoadableTerm):\n raise ValueError(\n \"Loadable workspace terms must be specialized to a \"\n \"domain, but got generic term {}\".format(term)\n )\n\n elif term.domain != graph.domain:\n raise ValueError(\n \"Initial workspace term {} has domain {}. \"\n \"Does not match pipeline domain {}\".format(\n term, term.domain, graph.domain,\n )\n )\n\n def resolve_domain(self, pipeline):\n \"\"\"Resolve a concrete domain for ``pipeline``.\n \"\"\"\n domain = pipeline.domain(default=self._default_domain)\n if domain is GENERIC:\n raise ValueError(\n \"Unable to determine domain for Pipeline.\\n\"\n \"Pass domain=<desired domain> to your Pipeline to set a \"\n \"domain.\"\n )\n return domain\n\n def _is_special_root_term(self, term):\n return (\n term is self._root_mask_term\n or term is self._root_mask_dates_term\n )\n\n def _resolve_hooks(self, hooks):\n if hooks is None:\n hooks = []\n return DelegatingHooks(self._default_hooks + hooks)\n\n\ndef _pipeline_output_index(dates, assets, mask):\n \"\"\"\n Create a MultiIndex for a pipeline output.\n\n Parameters\n ----------\n dates : pd.DatetimeIndex\n Row labels for ``mask``.\n assets : pd.Index\n Column labels for ``mask``.\n mask : np.ndarray[bool]\n Mask array indicating date/asset pairs that should be included in\n output index.\n\n Returns\n -------\n index : pd.MultiIndex\n MultiIndex containing (date, asset) pairs corresponding to ``True``\n values in ``mask``.\n \"\"\"\n date_labels = repeat_last_axis(arange(len(dates)), len(assets))[mask]\n asset_labels = repeat_first_axis(arange(len(assets)), len(dates))[mask]\n return MultiIndex(\n levels=[dates, assets],\n labels=[date_labels, asset_labels],\n # TODO: We should probably add names for these.\n names=[None, None],\n verify_integrity=False,\n )\n" ]
[ [ "pandas.MultiIndex.from_arrays", "numpy.array", "pandas.DataFrame", "pandas.MultiIndex" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
docteurZ/BentoML
[ "2ea9848f907a212818fc0c9da434e88cdecab19f" ]
[ "tests/handlers/test_tf_tensor_handler.py" ]
[ "import sys\nimport json\nimport base64\nimport math\nimport numbers\nimport numpy as np\n\ntry:\n from unittest.mock import Mock, MagicMock\nexcept ImportError:\n from mock import Mock, MagicMock\n\n\ndef mock_tensorflow_module():\n class MockTensor:\n def __init__(self, _input):\n self.input = _input\n\n def numpy(self):\n if isinstance(self.input, (list, tuple)):\n return np.array(self.input, dtype=object)\n return self.input\n\n def __eq__(self, dst):\n return self.input == dst.input\n\n class MockConstant(MockTensor):\n pass\n\n sys.modules['tensorflow'] = MagicMock()\n\n import tensorflow as tf\n\n tf.__version__ = \"2.0\"\n tf.Tensor = tf.compat.v2.Tensor = MockTensor\n tf.constant = tf.compat.v2.constant = MockConstant\n\n\nmock_tensorflow_module()\n\n\nSTR_BYTES = b\"hello world\"\nSTR = STR_BYTES.decode(\"utf-8\")\nSTR_B64 = base64.b64encode(STR_BYTES).decode()\n\nBIN_BYTES = b\"\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\"\nBIN_B64 = base64.b64encode(BIN_BYTES).decode()\n\nTEST_CASES = [\n {'instances': [[[1, 2]], [[3, 4]]]},\n {\"instances\": [[1.0, -float('inf'), float('inf')]]},\n {\"instances\": float('nan')},\n {\"instances\": {\"b64\": STR_B64}},\n {\"instances\": [{\"b64\": STR_B64}]},\n {\"instances\": {\"b64\": BIN_B64}},\n {\"instances\": [{\"b64\": BIN_B64}]},\n]\n\nEXPECTED_RESULTS = [\n [[[1, 2]], [[3, 4]]],\n [[1.0, -float('inf'), float('inf')]],\n float('nan'),\n STR,\n [STR],\n {\"b64\": BIN_B64},\n [{\"b64\": BIN_B64}],\n]\n\n\ndef assert_eq_or_both_nan(x, y):\n if isinstance(x, numbers.Number) and isinstance(y, numbers.Number):\n assert math.isnan(x) and math.isnan(y) or math.isclose(x, y)\n else:\n assert x == y\n\n\ndef test_tf_tensor_handle_request():\n '''\n ref: https://www.tensorflow.org/tfx/serving/api_rest#request_format_2\n '''\n from bentoml.handlers import TensorflowTensorHandler\n\n request = Mock()\n request.headers = {}\n request.content_type = 'application/json'\n\n handler = TensorflowTensorHandler()\n\n for input_data, except_result in zip(TEST_CASES, EXPECTED_RESULTS):\n request.data = json.dumps(input_data).encode('utf-8')\n response = handler.handle_request(request, lambda i: i)\n\n prediction = json.loads(response.get_data())\n assert_eq_or_both_nan(except_result, prediction)\n\n\ndef test_tf_tensor_handle_batch_request():\n '''\n ref: https://www.tensorflow.org/tfx/serving/api_rest#request_format_2\n '''\n from bentoml.handlers import TensorflowTensorHandler\n\n handler = TensorflowTensorHandler()\n request = Mock()\n\n for input_data, except_result in zip(TEST_CASES, EXPECTED_RESULTS):\n request.data = json.dumps(input_data).encode('utf-8')\n responses = handler.handle_batch_request([request] * 3, lambda i: i)\n\n for response in responses:\n prediction = json.loads(response.data)\n assert_eq_or_both_nan(except_result, prediction)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PedroLelis/tensor2tensor
[ "5a867d031bd493eeb7d2776e1118d1594ff0a623" ]
[ "tensor2tensor/data_generators/squad.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data generators for SquaAD (https://rajpurkar.github.io/SQuAD-explorer/).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_problems\nfrom tensor2tensor.data_generators import wiki_lm\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\[email protected]_problem\nclass Squad(text_problems.QuestionAndContext2TextProblem):\n \"\"\"Base class for SquAD question answering problem.\"\"\"\n\n _DEV_SET = \"dev-v1.1.json\"\n _URL = \"https://rajpurkar.github.io/SQuAD-explorer/dataset\"\n _TRAINING_SET = \"train-v1.1.json\"\n\n @property\n def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 10,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]\n\n @property\n def is_generate_per_split(self):\n return True\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n url = self._URL\n file_name = self._DEV_SET\n if dataset_split == problem.DatasetSplit.TRAIN:\n file_name = self._TRAINING_SET\n squad_file = generator_utils.maybe_download(tmp_dir,\n file_name,\n os.path.join(url, file_name))\n with tf.gfile.GFile(squad_file, mode=\"r\") as fp:\n squad = json.load(fp)\n\n version = squad[\"version\"]\n for article in squad[\"data\"]:\n if \"title\" in article:\n title = article[\"title\"].strip()\n else:\n title = \"no title\"\n for paragraph in article[\"paragraphs\"]:\n context = paragraph[\"context\"].strip()\n for qa in paragraph[\"qas\"]:\n question = qa[\"question\"].strip()\n id_ = qa[\"id\"]\n\n answer_starts = [answer[\"answer_start\"] for answer in qa[\"answers\"]]\n answers = [answer[\"text\"].strip() for answer in qa[\"answers\"]]\n\n # Features currently used are \"context\", \"question\", and \"answers\".\n # Others are extracted here for the ease of future expansions.\n example = {\n \"version\": version,\n \"title\": title,\n \"context\": context,\n \"question\": question,\n \"id\": id_,\n \"answer_starts\": answer_starts,\n \"answers\": answers,\n \"num_answers\": len(answers),\n \"is_supervised\": True,\n }\n yield {\n \"inputs\": example[\"question\"],\n # TODO(ddohan, wgaj): Figure out a way of extracting all answers.\n \"targets\": example[\"answers\"][0],\n \"context\": example[\"context\"]\n }\n\n\[email protected]_problem\nclass SquadConcat(Squad):\n \"\"\"Squad with question and context concatenated together in inputs.\"\"\"\n\n def dataset_filename(self):\n return \"squad\"\n\n def preprocess_example(self, example, unused_mode, unused_model_hparams):\n sep = tf.convert_to_tensor([self.QUESTION_SEPARATOR_ID],\n dtype=example[\"inputs\"].dtype)\n example[\"inputs\"] = tf.concat(\n [example[\"inputs\"], sep, example[\"context\"]], 0)\n return example\n\n def hparams(self, defaults, unused_model_hparams):\n (super(SquadConcat, self)\n .hparams(defaults, unused_model_hparams))\n p = defaults\n del p.modality[\"context\"]\n del p.vocab_size[\"context\"]\n\n\[email protected]_problem\nclass SquadConcatMulti64k(SquadConcat):\n \"\"\"Squad with question and context concatenated, multi-lingual vocabulary.\"\"\"\n\n @property\n def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 100,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]\n\n def preprocess_example(self, example, unused_mode, unused_model_hparams):\n sep = tf.convert_to_tensor([self.QUESTION_SEPARATOR_ID],\n dtype=example[\"inputs\"].dtype)\n example[\"inputs\"] = tf.concat(\n [example[\"inputs\"], sep, example[\"context\"]], 0)\n example.pop(\"context\")\n return example\n\n def dataset_filename(self):\n return \"squad_multi64k\"\n\n @property\n def vocab_filename(self):\n return wiki_lm.LanguagemodelDeEnFrRoWiki64k().vocab_filename\n\n\[email protected]_problem\nclass SquadConcatSharedVocab(SquadConcatMulti64k):\n \"\"\"Squad with question and context concatenated, multi-lingual vocabulary.\"\"\"\n\n def dataset_filename(self):\n return \"squad\"\n\n @property\n def vocab_filename(self):\n return wiki_lm.LanguagemodelEnWiki32k().vocab_filename\n\n\[email protected]_problem\nclass SquadConcatPositioned(SquadConcat):\n \"\"\"SquadConcat with targets in format of answer position + answer length.\"\"\"\n\n def generate_targets(self, targets, context):\n targets = targets[:-1] # skip last terminal symbol.\n targets_new = []\n i = 0\n while i < len(context) - len(targets):\n if context[i: i + len(targets)] == targets:\n # emit answer's position and length.\n targets_new.append(i)\n targets_new.append(len(targets))\n i += 1\n return targets_new\n\n def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):\n samples = (super(SquadConcatPositioned, self)\n .generate_encoded_samples(data_dir, tmp_dir, dataset_split))\n for sample in samples:\n sample[\"targets\"] = self.generate_targets(sample[\"targets\"],\n sample[\"context\"])\n if sample[\"targets\"]:\n yield sample\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.gfile.GFile", "tensorflow.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
StrayRobots/stray
[ "ea775a3c8ec52f32305fe30417bc3152eb9b532b" ]
[ "src/stray/training/data.py" ]
[ "import torch\nimport numpy as np\nimport cv2\nimport pytorch_lightning as pl\nimport albumentations as A\nfrom stray.scene import Scene\nfrom torch.utils.data import Dataset, ConcatDataset\nfrom stray.training import transform, get_heatmap, I\n\nclass StrayKeypointScene(Dataset):\n def __init__(self, path, config, image_size, out_size, eval=False):\n self.scene_path = path\n self.scene = Scene(path)\n self.image_width = image_size[0]\n self.image_height = image_size[1]\n self.out_width = out_size[0]\n self.out_height = out_size[1]\n self.primitive = config['primitive']\n self.num_instances = config['num_instances']\n\n self.eval = eval\n\n self.color_images = self.scene.get_image_filepaths()\n self.camera = self.scene.camera()\n\n self.map_camera = self.scene.camera().scale((self.out_width, self.out_height))\n\n self.transform = A.Compose([\n A.RandomResizedCrop(width=self.image_width, height=self.image_height, scale=(0.3, 1.0), ratio=(0.5, 1.3333333333333333), always_apply=True, p=1.0),\n ], keypoint_params=A.KeypointParams(format='xy', remove_invisible=False))\n\n self.keypoints_W = self._get_keypoints()\n\n def _get_keypoints(self):\n if self.primitive == \"keypoints\":\n world_points = np.zeros((self.num_instances, 3))\n for keypoint in self.scene.keypoints:\n world_points[keypoint.class_id] = keypoint.position\n elif self.primitive == \"rectangle\":\n world_points = np.zeros((4*self.num_instances, 3))\n for rectangle in self.scene.rectangles:\n keypoints = rectangle.keypoints()\n world_points[rectangle.class_id:rectangle.class_id+4] = keypoints\n else:\n raise ValueError(f\"Incorrect primitive{self.primitive}\")\n\n return world_points\n\n def _get_cv_image(self, idx):\n image = cv2.imread(self.color_images[idx])\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n def __len__(self):\n return len(self.scene)\n\n def _sort_keypoints(self, keypoints_I):\n \"\"\"\n Sorts the rectangle keypoints such that the are ordered top left, top right,\nfrom torch.nn.modules.loss import _Loss\nimport torch.nn.functional as F\n bottom right, bottom left, as rectangles can be oriented arbitrarily.\n Might have to make this configurable and visible in Studio, as this might vary by application.\n Bounding boxes are assumed to have a specific orientation.\n \"\"\"\n\n for rectangle in self.scene.rectangles:\n index = 4 * rectangle.class_id\n keypoints = keypoints_I[index:index+4]\n y_order = np.argsort(keypoints[:, 1])\n top_points = np.stack([keypoints[y_order[0]], keypoints[y_order[1]]])\n bottom_points = np.stack([keypoints[y_order[2]], keypoints[y_order[3]]])\n x_order_top = np.argsort(top_points[:, 0])\n x_order_bottom = np.argsort(bottom_points[:, 0])\n\n top_left = top_points[x_order_top[0]]\n top_right = top_points[x_order_top[1]]\n bottom_right = bottom_points[x_order_bottom[1]]\n bottom_left = bottom_points[x_order_bottom[0]]\n keypoints_I[index:index+4] = np.stack([\n top_left,\n top_right,\n bottom_right,\n bottom_left])\n return keypoints_I\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()[0]\n\n heatmaps = []\n\n T_CW = np.linalg.inv(self.scene.poses[idx])\n keypoint_positions_C = transform(T_CW, self.keypoints_W)\n keypoint_positions_I = self.camera.project(keypoint_positions_C)\n if self.primitive == \"rectangle\":\n keypoint_positions_I = self._sort_keypoints(keypoint_positions_I)\n\n cv_image = self._get_cv_image(idx)\n if not self.eval:\n transformed = self.transform(image=cv_image, keypoints=keypoint_positions_I)\n cv_image = transformed[\"image\"]\n keypoint_positions_I = transformed[\"keypoints\"]\n else:\n cv_image = cv2.resize(cv_image, (self.image_width, self.image_height))\n\n cv_image = cv_image.astype(np.float32)\n np_image = np.transpose(cv_image/255.0, [2, 0, 1])\n\n for point_2d_I, point_3d in zip(keypoint_positions_I, self.keypoints_W):\n width_scale = self.out_width / self.image_width\n height_scale = self.out_height / self.image_height\n point_2d_I = [point_2d_I[0]*width_scale, point_2d_I[1]*height_scale]\n\n #NOTE: eval scenes that are not in real-world scale may need to use some constant for the distance\n point_3d_C = transform(T_CW, point_3d[None])[0]\n #TODO: possibly larger value\n diagonal_fraction = 0.3\n top_point = self.map_camera.project((point_3d_C - I[1] * diagonal_fraction)[None])[0]\n bottom_point = self.map_camera.project((point_3d_C + I[1] * diagonal_fraction)[None])[0]\n size = np.linalg.norm(top_point - bottom_point)\n lengthscale = np.sqrt(size**2/20.0)\n\n if self.eval:\n lengthscale = 5\n heatmap = get_heatmap(point_2d_I, self.out_width, self.out_height, lengthscale)\n heatmaps.append(heatmap)\n\n heatmaps = np.array(heatmaps)\n return torch.from_numpy(np_image).float(), torch.from_numpy(heatmaps).float()\n\nclass StrayKeypointDetectionDataset(ConcatDataset):\n def __init__(self, scene_paths, *args, **kwargs):\n scenes = []\n for scene_path in scene_paths:\n scenes.append(StrayKeypointScene(scene_path, *args, **kwargs))\n super().__init__(scenes)\n\n\nclass KeypointSceneData(pl.LightningDataModule):\n def __init__(self, train_dirs, eval_dirs, train_batch_size, eval_batch_size,\n num_workers, config, image_size, out_size):\n super().__init__()\n self.train_dirs = train_dirs\n self.eval_dirs = eval_dirs\n self.train_batch_size = train_batch_size\n self.eval_batch_size = eval_batch_size\n self.num_workers = num_workers\n self.image_size = image_size\n self.out_size = out_size\n self.config = config\n\n def train_dataloader(self):\n dataset = StrayKeypointDetectionDataset(self.train_dirs, self.config, self.image_size, self.out_size)\n\n return torch.utils.data.DataLoader(dataset,\n num_workers=self.num_workers,\n batch_size=self.train_batch_size,\n persistent_workers=True if self.num_workers > 0 else False,\n pin_memory=torch.cuda.is_available())\n\n def val_dataloader(self):\n dataset = StrayKeypointDetectionDataset(self.eval_dirs, self.config, self.image_size, self.out_size, eval=True)\n\n return torch.utils.data.DataLoader(dataset,\n num_workers=self.num_workers,\n batch_size=self.eval_batch_size,\n persistent_workers=True if self.num_workers > 0 else False,\n pin_memory=torch.cuda.is_available())\n" ]
[ [ "numpy.sqrt", "numpy.linalg.inv", "torch.is_tensor", "numpy.stack", "numpy.linalg.norm", "torch.from_numpy", "torch.cuda.is_available", "numpy.transpose", "numpy.argsort", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dragon00417/Flask-Smart-API
[ "828a2198ae58ed1cb8902b8f333218bcd466d2c6" ]
[ "neuralpy/loss_functions/bce_loss.py" ]
[ "\"\"\"BCE Loss\"\"\"\n\nimport numpy as np\nimport torch\nfrom torch.nn import BCEWithLogitsLoss as _BCEWithLogitsLoss\n\n# pylint: disable=too-few-public-methods\n\n\nclass BCELoss:\n \"\"\"\n Applies a BCE Loss function to the model.\n\n BCE Loss automatically applies a Sigmoid Layer\n at the end of the model, so there is no need to add\n a Sigmoid layer.\n\n Supported Arguments\n weight=None : (Numpy Array | List) Manual rescaling of classes\n reduction='mean' : (String) Specifies the reduction\n that is to be applied to the output.\n post_weight=None : (Numpy Array | List) A weight of positive examples\n \"\"\"\n\n def __init__(self, weight=None, reduction='mean', pos_weight=None):\n \"\"\"\n __init__ method for BCELoss\n\n Supported Arguments\n weight=None : (Numpy Array | List) Manual rescaling of classes\n reduction='mean' : (String) Specifies the reduction\n that is to be applied to the output.\n post_weight=None : (Numpy Array | List) A weight of positive examples\n \"\"\"\n if weight is not None and not (\n isinstance(weight, list) or\n type(weight).__module__ == np.__name__):\n raise ValueError(\"Invalid weight\")\n\n if reduction not in [\"none\", \"mean\", \"sum\"]:\n raise ValueError(\"Invalid reduction\")\n\n if pos_weight is not None and not (\n isinstance(pos_weight, list) or\n type(pos_weight).__module__ == np.__name__):\n raise ValueError(\"Invalid pos_weight\")\n\n self.__weight = weight\n self.__reduction = reduction\n self.__pos_weight = pos_weight\n\n def get_loss_function(self):\n \"\"\"\n Returns the details of the loss function\n\n There is no need to call this method as this is used by the\n Sequential model to build the model\n \"\"\"\n # If weight provided, then converting it into torch tensor\n # pylint: disable=not-callable\n weight = None\n\n if self.__weight is not None:\n weight = torch.tensor(self.__weight).float()\n\n # pos_weight provided, then converting in into torch tensor\n pos_weight = None\n\n if self.__pos_weight is not None:\n pos_weight = torch.tensor(self.__pos_weight).float()\n\n return {\n 'loss_function': _BCEWithLogitsLoss,\n 'keyword_arguments': {\n 'weight': weight,\n 'reduction': self.__reduction,\n 'pos_weight': pos_weight\n }\n }\n" ]
[ [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
raywan-110/quantization-DNNs
[ "7295f60d858c0e7daa7a6f14fe491b489eaa6f42" ]
[ "PTQ.py" ]
[ "import torch\nimport time\nfrom Net import MNIST_Net, CIFAR10_Net, MNIST_NetBN, CIFAR10_NetBN\nfrom utils import load_data_fashion_mnist, load_data_cifar10\n\ndef direct_quantize(model, test_loader):\n for i, (data, target) in enumerate(test_loader, 1):\n _ = model.quantize_forward(data)\n if i % 500 == 0:\n break\n print('direct quantization finish')\n\ndef quantize_inference(model, test_loader):\n correct = 0\n for _, (data, target) in enumerate(test_loader, 1):\n output = model.quantize_inference(data)\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n print('\\nTest set: Quant Model Accuracy: {:.0f}%\\n'.format(100. * correct / len(test_loader.dataset)))\n\ndef full_inference(model, test_loader):\n correct = 0\n for _, (data, target) in enumerate(test_loader, 1):\n output = model(data)\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n print('\\nTest set: Full Model Accuracy: {:.0f}%\\n'.format(100. * correct / len(test_loader.dataset)))\n\nif __name__ == '__main__':\n dataset = 'cifar10'\n test_batch_size = 64\n use_bn = True\n bit = 8\n print('bits: ',bit)\n if dataset == 'cifar10':\n if use_bn:\n model = CIFAR10_NetBN(num_channels=3)\n model.load_state_dict(torch.load('./ckpt/cifar10_cnnBN.pt'))\n else:\n model = CIFAR10_Net(num_channels=3)\n model.load_state_dict(torch.load('./ckpt/cifar10_cnn.pt'))\n train_iter, test_iter = load_data_cifar10(batch_size=test_batch_size)\n else:\n if use_bn:\n model = MNIST_NetBN(num_channels=1)\n model.load_state_dict(torch.load('./ckpt/mnist_cnnBN.pt'))\n else:\n model = MNIST_Net(num_channels=1)\n model.load_state_dict(torch.load('./ckpt/mnist_cnn.pt'))\n train_iter, test_iter = load_data_fashion_mnist(batch_size=test_batch_size)\n\n # full precision inference\n begin = time.time()\n model.eval()\n with torch.no_grad():\n full_inference(model,test_iter)\n end = time.time()\n print('full inference runtime: ', end - begin)\n\n # quantize\n model.quantize(num_bits=bit)\n direct_quantize(model,train_iter) # statistics the value of rmin rmax and updates scale zero_point\n model.freeze()\n\n # quantize inference\n model.eval()\n begin = time.time()\n with torch.no_grad():\n quantize_inference(model,test_iter)\n end = time.time()\n print('runtime: ',end - begin)\n" ]
[ [ "torch.no_grad", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ai-msceteers/assignment2
[ "c2d8e3883be732f21d6ee0d9590f7e455ef1f51f" ]
[ "genetic_solver/rubiks_cube.py" ]
[ "import numpy as np\nimport random\nfrom copy import deepcopy\nfrom collections import Counter\n\n \n\nclass RubiksCube(object):\n \n # Faces stores in 6 NxN faces stored as a numpy matrix in cube \n # 0 : front\n # 1 : top\n # 2 : back\n # 3 : bottom\n # 4 : left\n # 5 : right\n \n \n \n def __init__(self, n=3, randomize=True):\n self.n = n\n self.worst_cost = 48\n self.random_moves = []\n self.cost = 0\n self.cube = np.stack([np.ones((n,n))*i for i in range(6)]) \n if(randomize):\n self.randomize() \n\n def __str__(self):\n return str(self.face())\n \n def __repr__(self):\n return str(self.face())\n \n def face(self):\n return self.cube[0]\n \n def is_solved(self):\n return self.cost == 0\n \n \n # TODO: Look for a better cost function\n def calulate_cost_v1(self):\n total_cost = 0\n for i in range(6):\n _, most_common_count = Counter(self.cube[i].ravel()).most_common()[0]\n current_face_cost = self.n**2 - most_common_count\n total_cost += current_face_cost\n \n self.cost = total_cost\n return self.cost\n \n def calculate_cost(self):\n #print()\n #print()\n self.worst_cost = ((self.n*self.n*(self.n-1)) + 1)*6\n\n get_n_colors_func = lambda x: len(set(x)) - 1 \n binary_cost_func = lambda x: int(len(set(x))!= 1)\n cost_func = get_n_colors_func\n #cost_func = binary_cost_func\n \n def face_cost_func(face):\n #print(face)\n rows_cost = np.apply_along_axis(cost_func, 1, face).sum()\n cols_cost = np.apply_along_axis(cost_func, 0, face).sum()\n is_solved = (rows_cost + cols_cost) == 0\n current_face_cost = rows_cost + cols_cost + (not is_solved)\n #print(rows_cost, cols_cost, is_solved, current_face_cost)\n return current_face_cost\n \n total_cost = np.sum([face_cost_func(self.cube[i]) for i in range(6)])\n #print(total_cost)\n \n #for i in range(6):\n # current_face_cost = face_cost_func(self.cube[i])\n # total_cost += current_face_cost\n \n self.cost = total_cost\n return self.cost\n\n \n def get_random_move(self):\n row = random.randint(0,self.n-1)\n direction = random.choice([\"u\", \"d\", \"l\", \"r\"])\n move = (row, direction)\n return move\n\n \n def randomize(self, n_moves_range = (30, 40)):\n self.random_moves = []\n for i in range(random.randint(*n_moves_range)):\n move = self.get_random_move()\n self.random_moves.append(move)\n self.apply_move(move)\n \n \n def backtrack(self, moves):\n reverse_move_dict = {\"u\":\"d\", \"d\":\"u\", \"l\":\"r\", \"r\":\"l\"}\n for move in moves[::-1]:\n reverse_move = (move[0], reverse_move_dict[move[1]])\n self.apply_move(reverse_move) \n return self.face()\n \n \n def apply_move(self, move):\n if(move==None):\n return self.face()\n \n row, direction = move\n \n if(direction==\"u\"):\n order = [0, 1, 2, 3, 0]\n elif(direction==\"d\"):\n order = [0, 3, 2, 1, 0]\n elif(direction==\"l\"):\n order = [0, 4, 2, 5, 0]\n elif(direction==\"r\"):\n order = [0, 5, 2, 4, 0]\n \n temp_cube = deepcopy(self.cube)\n for i in range(len(order)-1):\n i1, i2 = order[i], order[i+1]\n if(direction in [\"u\", \"d\"]):\n self.cube[i2,:, row] = temp_cube[i1, :, row]\n elif(direction in [\"l\", \"r\"]):\n self.cube[i2, row, :] = temp_cube[i1, row, :] \n self.cost = self.calculate_cost()\n \n return self.face()\n \n \n def apply_moves(self, moves):\n for move in moves:\n self.apply_move(move) \n \n \ncube = RubiksCube(3, randomize=True)\norginal = deepcopy(cube)\nrandom_moves = [cube.get_random_move() for i in range(random.randint(0, 6))]\ncube = RubiksCube(3, randomize=True)\norginal = deepcopy(cube)\ncube.apply_moves(random_moves)\ncube.backtrack(random_moves)\n#print(cube.cube)\n#cube.apply_move(cube.get_random_move())\n#cube.apply_move(cube.get_random_move())\n#print(cube.calculate_cost())\n \n \n\n \n \n " ]
[ [ "numpy.apply_along_axis", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
emmamcbryde/AuTuMN-1
[ "b1e7de15ac6ef6bed95a80efab17f0780ec9ff6f" ]
[ "autumn/models/tuberculosis/outputs.py" ]
[ "import numpy as np\n\nfrom typing import List\nfrom summer import CompartmentalModel\n\nfrom autumn.model_features.curve import tanh_based_scaleup\n\nfrom .constants import COMPARTMENTS, Compartment, INFECTIOUS_COMPS\n\n\ndef request_outputs(\n model: CompartmentalModel,\n cumulative_start_time: float,\n location_strata: List[str],\n time_variant_tb_screening_rate,\n implement_acf: bool,\n implement_ltbi_screening=False,\n pt_efficacy=1.,\n pt_sae_prop=0.\n):\n out = OutputBuilder(model, location_strata)\n\n # Population\n out.request_compartment_output(\"population_size\", COMPARTMENTS)\n\n # Percentage latent\n latent_comps = [Compartment.LATE_LATENT, Compartment.EARLY_LATENT]\n out.request_compartment_output(\"latent_population_size\", latent_comps, save_results=False)\n sources = [\"latent_population_size\", \"population_size\"]\n out.request_output_func(\"percentage_latent\", calculate_percentage, sources)\n\n # Deaths\n out.request_flow_output(\"mortality_infectious_raw\", \"infect_death\", save_results=False)\n out.request_flow_output(\"mortality_on_treatment_raw\", \"treatment_death\", save_results=False)\n sources = [\"mortality_infectious_raw\", \"mortality_on_treatment_raw\"]\n out.request_aggregation_output(\"mortality_raw\", sources, save_results=False)\n model.request_cumulative_output(\n \"cumulative_deaths\",\n \"mortality_raw\",\n start_time=cumulative_start_time,\n )\n\n # Normalise mortality so that it is per unit time (year), not per timestep\n out.request_normalise_flow_output(\"mortality_infectious\", \"mortality_infectious_raw\")\n out.request_normalise_flow_output(\"mortality_on_treatment\", \"mortality_on_treatment_raw\")\n out.request_normalise_flow_output(\"mortality_norm\", \"mortality_raw\", save_results=False)\n sources = [\"mortality_norm\", \"population_size\"]\n out.request_output_func(\"mortality\", calculate_per_hundred_thousand, sources)\n\n # Disease incidence\n out.request_flow_output(\"incidence_early_raw\", \"early_activation\", save_results=False)\n out.request_flow_output(\"incidence_late_raw\", \"late_activation\", save_results=False)\n sources = [\"incidence_early_raw\", \"incidence_late_raw\"]\n out.request_aggregation_output(\"incidence_raw\", sources, save_results=False)\n sources = [\"incidence_raw\", \"population_size\"]\n model.request_cumulative_output(\n \"cumulative_diseased\",\n \"incidence_raw\",\n start_time=cumulative_start_time,\n )\n\n # Normalise incidence so that it is per unit time (year), not per timestep\n out.request_normalise_flow_output(\"incidence_early\", \"incidence_early_raw\")\n out.request_normalise_flow_output(\"incidence_late\", \"incidence_late_raw\")\n out.request_normalise_flow_output(\"incidence_norm\", \"incidence_raw\", save_results=False)\n sources = [\"incidence_norm\", \"population_size\"]\n out.request_output_func(\"incidence\", calculate_per_hundred_thousand, sources)\n\n # Prevalence infectious\n out.request_compartment_output(\n \"infectious_population_size\", INFECTIOUS_COMPS, save_results=False\n )\n sources = [\"infectious_population_size\", \"population_size\"]\n out.request_output_func(\"prevalence_infectious\", calculate_per_hundred_thousand, sources)\n\n # Notifications (normalized to per year)\n out.request_flow_output(\"passive_notifications_raw\", \"detection\", save_results=False)\n if implement_acf:\n out.request_flow_output(\"active_notifications_raw\", \"acf_detection\", save_results=False)\n else:\n null_func = lambda: np.zeros_like(model.times)\n out.request_output_func(\"active_notifications_raw\", null_func, [], save_results=False)\n\n sources = [\"passive_notifications_raw\", \"active_notifications_raw\"]\n out.request_aggregation_output(\"notifications_raw\", sources, save_results=False)\n out.request_normalise_flow_output(\"notifications\", \"notifications_raw\")\n\n # Screening rate\n screening_rate_func = tanh_based_scaleup(\n time_variant_tb_screening_rate[\"shape\"],\n time_variant_tb_screening_rate[\"inflection_time\"],\n time_variant_tb_screening_rate[\"start_asymptote\"],\n time_variant_tb_screening_rate[\"end_asymptote\"],\n )\n\n def get_screening_rate():\n return screening_rate_func(model.times)\n\n model.request_function_output(\"screening_rate\", get_screening_rate, [])\n\n # Track cumulative number of preventive treatments provided from 2016\n if implement_ltbi_screening:\n model.request_output_for_flow(\"pt_early_raw\", \"preventive_treatment_early\", save_results=False)\n model.request_output_for_flow(\"pt_late_raw\", \"preventive_treatment_late\", save_results=False)\n model.request_aggregate_output(\"pt_raw\", [\"pt_early_raw\", \"pt_late_raw\"], save_results=False)\n\n # so far, the pt flows only include succesfully treated individuals, we need tp adjust for efficacy\n model.request_function_output(\n name=\"pt\",\n func=lambda x: x / pt_efficacy,\n sources=[\"pt_raw\"],\n save_results=False,\n )\n model.request_cumulative_output(\"cumulative_pt\", \"pt\", start_time=2016., save_results=True)\n model.request_function_output(\n name=\"cumulative_pt_sae\",\n func=lambda x: x * pt_sae_prop,\n sources=[\"cumulative_pt\"],\n save_results=True,\n )\n else: # just record zeroes if PT not implemented\n for zero_output in [\"cumulative_pt\", \"cumulative_pt_sae\"]:\n model.request_function_output(\n name=zero_output,\n func=lambda x: x * 0., # uses x * 0 so we copy the size of the source output x\n sources=[\"incidence\"], # could be any source output\n save_results=True,\n )\n\n\nclass OutputBuilder:\n \"\"\"Helps build derived outputs for the TB model\"\"\"\n\n def __init__(self, model, location_strata) -> None:\n self.model = model\n self.locs = location_strata\n\n def _normalise_timestep(self, vals):\n \"\"\"Normalise flow outputs to be 'per unit time (year)'\"\"\"\n return vals / self.model.timestep\n\n def request_normalise_flow_output(self, output_name, source, save_results=True, stratify_by_loc=True):\n self.request_output_func(\n output_name, self._normalise_timestep, [source], save_results=save_results, stratify_by_loc=stratify_by_loc\n )\n\n def request_flow_output(self, output_name, flow_name, save_results=True):\n self.model.request_output_for_flow(output_name, flow_name, save_results=save_results)\n for location_stratum in self.locs:\n loc_output_name = f\"{output_name}Xlocation_{location_stratum}\"\n self.model.request_output_for_flow(\n loc_output_name,\n flow_name,\n source_strata={\"location\": location_stratum},\n save_results=save_results,\n )\n\n def request_aggregation_output(self, output_name, sources, save_results=True):\n self.model.request_aggregate_output(output_name, sources, save_results=save_results)\n for location_stratum in self.locs:\n # For location-specific mortality calculations\n loc_output_name = f\"{output_name}Xlocation_{location_stratum}\"\n loc_sources = [f\"{s}Xlocation_{location_stratum}\" for s in sources]\n self.model.request_aggregate_output(\n loc_output_name, loc_sources, save_results=save_results\n )\n\n def request_output_func(self, output_name, func, sources, save_results=True, stratify_by_loc=True):\n self.model.request_function_output(output_name, func, sources, save_results=save_results)\n if stratify_by_loc:\n for location_stratum in self.locs:\n loc_output_name = f\"{output_name}Xlocation_{location_stratum}\"\n loc_sources = [f\"{s}Xlocation_{location_stratum}\" for s in sources]\n self.model.request_function_output(\n loc_output_name, func, loc_sources, save_results=save_results\n )\n\n def request_compartment_output(self, output_name, compartments, save_results=True):\n self.model.request_output_for_compartments(\n output_name, compartments, save_results=save_results\n )\n for location_stratum in self.locs:\n # For location-specific mortality calculations\n loc_output_name = f\"{output_name}Xlocation_{location_stratum}\"\n self.model.request_output_for_compartments(\n loc_output_name,\n compartments,\n strata={\"location\": location_stratum},\n save_results=save_results,\n )\n\n\ndef calculate_per_hundred_thousand(sub_pop_size, total_pop_size):\n return 1e5 * sub_pop_size / total_pop_size\n\n\ndef calculate_percentage(sub_pop_size, total_pop_size):\n return 100 * sub_pop_size / total_pop_size\n" ]
[ [ "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
taechanha/JORLDY
[ "7356f7481dbc569bf745353105088d65665a4a51" ]
[ "jorldy/core/agent/mpo.py" ]
[ "from collections import deque\nimport os\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\n\nfrom .base import BaseAgent\nfrom core.network import Network\nfrom core.optimizer import Optimizer\nfrom core.buffer import ReplayBuffer\n\n\nclass MPO(BaseAgent):\n \"\"\"Maximum A Posteriori Policy Optimization (MPO) agent.\n\n Args:\n state_size (int): dimension of state.\n action_size (int): dimension of action.\n hidden_size (int): dimension of hidden unit.\n optim_config (dict): dictionary of the optimizer info.\n (key: 'name', value: name of optimizer)\n actor (str): key of actor network class in _network_dict.txt.\n critic (str): key of critic network class in _network_dict.txt.\n head (str): key of head in _head_dict.txt.\n buffer_size (int): the size of the memory buffer.\n batch_size (int): the number of samples in the one batch.\n start_train_step (int): steps to start learning.\n n_epoch (int): Number of epoch when optimizing the surrogate.\n n_step (int): The number of steps to run for each environment per update.\n clip_grad_norm (float): gradient clipping threshold.\n gamma (float): discount factor.\n device (str): device to use.\n (e.g. 'cpu' or 'gpu'. None can also be used, and in this case, the cpu is used.)\n num_workers: the number of agents in distributed learning.\n critic_loss_type (str): type of critic loss. One of ['1step_TD', 'retrace'].\n num_sample (int): the number of samples.\n min_eta (float): minimum value of eta.\n min_alpha_mu (float): minimum value of alpha_mu.\n min_alpha_sigma (float): minimum value of alpha_sigma.\n eps_eta (float): threshold of temperature loss term.\n eps_alpha_mu (float): threshold of mean part of Gaussian-KL constraint term.\n eps_alpha_sigma (float): threshold of variance part of Gaussian-KL constraint term.\n eta (float): Lagrange multipliers of temperature loss term.\n alpha_mu (float): Lagrange multipliers of mean part of Gaussian-KL constraint term (trust-region loss).\n alpha_sigma (float): Lagrange multipliers of variance part of Gaussian-KL constraint term.\n \"\"\"\n\n def __init__(\n self,\n state_size,\n action_size,\n hidden_size=512,\n optim_config={\"name\": \"adam\"},\n actor=\"discrete_policy\",\n critic=\"dqn\",\n head=\"mlp\",\n buffer_size=50000,\n batch_size=64,\n start_train_step=2000,\n n_epoch=64,\n n_step=8,\n clip_grad_norm=1.0,\n gamma=0.99,\n device=None,\n num_workers=1,\n # parameters unique to MPO\n critic_loss_type=\"retrace\", # one of ['1step_TD', 'retrace']\n num_sample=30,\n min_eta=1e-8,\n min_alpha_mu=1e-8,\n min_alpha_sigma=1e-8,\n eps_eta=0.01,\n eps_alpha_mu=0.01,\n eps_alpha_sigma=5 * 1e-5,\n eta=1.0,\n alpha_mu=1.0,\n alpha_sigma=1.0,\n **kwargs,\n ):\n self.device = (\n torch.device(device)\n if device\n else torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n )\n self.head = head\n self.action_type = actor.split(\"_\")[0]\n assert self.action_type in [\"continuous\", \"discrete\"]\n self.action_size = action_size\n\n self.actor = Network(\n actor, state_size, action_size, D_hidden=hidden_size, head=head\n ).to(self.device)\n self.target_actor = Network(\n actor, state_size, action_size, D_hidden=hidden_size, head=head\n ).to(self.device)\n self.target_actor.load_state_dict(self.actor.state_dict())\n\n assert critic_loss_type in [\"1step_TD\", \"retrace\"]\n self.critic_loss_type = critic_loss_type\n self.critic = Network(\n critic, state_size, action_size, D_hidden=hidden_size, head=head\n ).to(self.device)\n self.target_critic = Network(\n critic, state_size, action_size, D_hidden=hidden_size, head=head\n ).to(self.device)\n self.target_critic.load_state_dict(self.critic.state_dict())\n\n self.batch_size = batch_size\n self.n_step = n_step if critic_loss_type == \"retrace\" else 1\n self.clip_grad_norm = clip_grad_norm\n\n self.num_learn = 0\n self.time_t = 0\n self.start_train_step = start_train_step\n self.n_epoch = n_epoch\n\n self.num_sample = num_sample\n\n self.min_eta = torch.tensor(min_eta, device=self.device)\n self.min_alpha_mu = torch.tensor(min_alpha_mu, device=self.device)\n self.min_alpha_sigma = torch.tensor(min_alpha_sigma, device=self.device)\n\n self.eps_eta = eps_eta\n self.eps_alpha_mu = eps_alpha_mu\n self.eps_alpha_sigma = eps_alpha_sigma\n\n self.eta = torch.nn.Parameter(\n torch.tensor(eta, requires_grad=True).to(self.device)\n )\n self.alpha_mu = torch.nn.Parameter(\n torch.tensor(alpha_mu, requires_grad=True).to(self.device)\n )\n self.alpha_sigma = torch.nn.Parameter(\n torch.tensor(alpha_sigma, requires_grad=True).to(self.device)\n )\n\n self.reset_lgr_muls()\n\n self.actor_optimizer = Optimizer(\n **optim_config,\n params=list(self.actor.parameters())\n + [self.eta, self.alpha_mu, self.alpha_sigma],\n )\n self.critic_optimizer = Optimizer(\n **optim_config, params=list(self.critic.parameters())\n )\n\n self.gamma = gamma\n self.tmp_buffer = deque(maxlen=n_step)\n self.memory = ReplayBuffer(buffer_size)\n\n @torch.no_grad()\n def act(self, state, training=True):\n self.actor.train(training)\n if self.action_type == \"continuous\":\n mu, std = self.actor(self.as_tensor(state))\n m = Normal(mu, std)\n z = m.sample() if training else mu\n action = torch.tanh(z)\n action = action.data.cpu().numpy()\n prob = m.log_prob(z).sum(axis=-1, keepdims=True)\n prob = prob.exp().cpu().numpy()\n\n else:\n pi = self.actor(self.as_tensor(state))\n action = (\n torch.multinomial(pi, 1)\n if training\n else torch.argmax(pi, dim=-1, keepdim=True)\n )\n action = action.cpu().numpy()\n prob = np.take(pi.cpu().numpy(), action)\n return {\n \"action\": action,\n \"prob\": prob,\n }\n\n def learn(self):\n transitions = self.memory.sample(self.batch_size)\n for key in transitions.keys():\n # reshape: (batch_size, len_tr, item_dim)\n # -> (batch_size * len_tr, item_dim)\n transitions[key] = self.as_tensor(transitions[key]).view(\n -1, *transitions[key].shape[2:]\n )\n\n state = transitions[\"state\"]\n action = transitions[\"action\"]\n reward = transitions[\"reward\"]\n next_state = transitions[\"next_state\"]\n done = transitions[\"done\"]\n prob_b = transitions[\"prob\"]\n\n if self.action_type == \"continuous\":\n mu, std = self.actor(state)\n Q = self.critic(state, action)\n m = Normal(mu, std)\n z = torch.atanh(torch.clamp(action, -1 + 1e-7, 1 - 1e-7))\n log_pi = m.log_prob(z)\n log_prob = log_pi.sum(axis=-1, keepdims=True)\n prob = torch.exp(log_prob)\n\n with torch.no_grad():\n mut, stdt = self.target_actor(state)\n mt = Normal(mut, stdt)\n zt = torch.atanh(torch.clamp(action, -1 + 1e-7, 1 - 1e-7))\n log_pit = mt.log_prob(zt)\n log_probt = log_pit.sum(axis=-1, keepdims=True)\n\n mu_old = mut\n std_old = stdt\n prob_t = torch.exp(log_probt)\n\n Qt_a = self.target_critic(state, action)\n\n next_mu, next_std = self.actor(next_state)\n mn = Normal(next_mu, next_std)\n zn = mn.sample(\n (self.num_sample,)\n ) # (num_sample, batch_size * len_tr, dim_action)\n next_action = torch.tanh(zn)\n\n Qt_next = self.target_critic(\n next_state.unsqueeze(0).repeat_interleave(self.num_sample, dim=0),\n next_action,\n ) # (num_sample, batch_size * len_tr, 1)\n\n c = torch.clip(prob / (prob_b + 1e-6), max=1.0)\n\n if self.critic_loss_type == \"1step_TD\":\n Qret = reward + self.gamma * (1 - done) * Qt_next.mean(axis=0)\n elif self.critic_loss_type == \"retrace\":\n Qret = reward + self.gamma * Qt_next.mean(axis=0) * (1 - done)\n\n # temporarily reshaping values\n # (batch_size * len_tr, item_dim) -> (batch_size, len_tr, item_dim)\n Qret = Qret.view(self.batch_size, -1, *Qret.shape[1:])\n Qt_a = Qt_a.view(self.batch_size, -1, *Qt_a.shape[1:])\n c = c.view(self.batch_size, -1, *c.shape[1:])\n done = done.view(self.batch_size, -1, *done.shape[1:])\n for i in reversed(range(Qret.shape[1] - 1)):\n Qret[:, i] += (\n self.gamma\n * c[:, i + 1]\n * (1 - done[:, i])\n * (Qret[:, i + 1] - Qt_a[:, i + 1])\n )\n Qret = Qret.view(-1, *Qret.shape[2:])\n\n zt_add = mt.sample(\n (self.num_sample,)\n ) # (num_sample, batch_size * len_tr, dim_action)\n action_add = torch.tanh(zt_add)\n log_pi_add = m.log_prob(zt_add)\n log_prob_add = log_pi_add.sum(axis=-1, keepdims=True)\n Qt_add = self.target_critic(\n state.unsqueeze(0).repeat_interleave(self.num_sample, dim=0), action_add\n )\n\n critic_loss = F.mse_loss(Q, Qret).mean()\n\n # Calculate Vt_add, At_add using Qt_add\n Vt_add = torch.mean(Qt_add, axis=0, keepdims=True)\n At_add = Qt_add - Vt_add\n At = At_add\n\n \"\"\" variational distribution q uses exp(At / eta) instead of exp(Qt / eta), for stable learning\"\"\"\n q = torch.softmax(At_add / self.eta, axis=0)\n actor_loss = -torch.mean(torch.sum(q.detach() * log_prob_add, axis=0))\n\n eta_loss = self.eta * self.eps_eta + self.eta * torch.mean(\n torch.log(torch.exp((At_add) / self.eta).mean(axis=0))\n )\n\n ss = 1.0 / (std ** 2) # (batch_size * len_tr, action_dim)\n ss_old = 1.0 / (std_old ** 2)\n\n \"\"\"\n KL-Divergence losses(related to alpha) implemented using methods introduced from V-MPO paper\n https://arxiv.org/abs/1909.12238\n \"\"\"\n\n # mu\n d_mu = mu - mu_old.detach() # (batch_size * len_tr, action_dim)\n KLD_mu = 0.5 * torch.sum(d_mu * 1.0 / ss_old.detach() * d_mu, axis=-1)\n mu_loss = torch.mean(\n self.alpha_mu * (self.eps_alpha_mu - KLD_mu.detach())\n + self.alpha_mu.detach() * KLD_mu\n )\n\n # sigma\n KLD_sigma = 0.5 * (\n torch.sum(1.0 / ss * ss_old.detach(), axis=-1)\n - ss.shape[-1]\n + torch.log(\n torch.prod(ss, axis=-1) / torch.prod(ss_old.detach(), axis=-1)\n )\n )\n sigma_loss = torch.mean(\n self.alpha_sigma * (self.eps_alpha_sigma - KLD_sigma.detach())\n + self.alpha_sigma.detach() * KLD_sigma\n )\n\n alpha_loss = mu_loss + sigma_loss\n\n else:\n pi = self.actor(state) # pi,Q: (batch_size, len_tr, dim_action)\n pi_next = self.actor(next_state)\n Q = self.critic(state)\n Q_a = Q.gather(1, action.long())\n\n with torch.no_grad():\n # calculate Q_ret using Retrace\n Qt = self.target_critic(state) # Q_target\n Qt_next = self.target_critic(next_state)\n pit = self.target_actor(state)\n\n Qt_a = Qt.gather(1, action.long())\n prob_t = pi.gather(\n 1, action.long()\n ) # (batch_size * len_tr, 1), target policy probability\n\n c = torch.clip(\n prob_t / (prob_b + 1e-6), max=1.0\n ) # (batch_size * len_tr, 1), prod of importance ratio and gamma\n\n if self.critic_loss_type == \"1step_TD\":\n Qret = reward + self.gamma * (1 - done) * torch.sum(\n pi_next * Qt_next, axis=-1, keepdim=True\n )\n elif self.critic_loss_type == \"retrace\":\n Qret = reward + self.gamma * torch.sum(\n pi_next * Qt_next, axis=-1, keepdim=True\n ) * (1 - done)\n\n # temporarily reshaping values\n # (batch_size * len_tr, item_dim) -> (batch_size, len_tr, item_dim)\n Qret = Qret.view(self.batch_size, -1, *Qret.shape[1:])\n Qt_a = Qt_a.view(self.batch_size, -1, *Qt_a.shape[1:])\n c = c.view(self.batch_size, -1, *c.shape[1:])\n done = done.view(self.batch_size, -1, *done.shape[1:])\n for i in reversed(\n range(Qret.shape[1] - 1)\n ): # along the trajectory length\n Qret[:, i] += (\n self.gamma\n * c[:, i + 1]\n * (Qret[:, i + 1] - Qt_a[:, i + 1])\n * (1 - done[:, i])\n )\n Qret = Qret.view(-1, *Qret.shape[2:])\n\n pi_old = pit\n\n critic_loss = F.mse_loss(Q_a, Qret).mean()\n\n # calculate V, Advantage of Qt\n Vt = torch.sum(pi_old * Qt, axis=-1, keepdims=True)\n At = Qt - Vt\n\n \"\"\" variational distribution q uses exp(At / eta) instead of exp(Qt / eta), for stable learning\"\"\"\n q = torch.softmax(At / self.eta, axis=-1)\n actor_loss = -torch.mean(torch.sum(q.detach() * torch.log(pi), axis=-1))\n\n eta_loss = self.eta * self.eps_eta + self.eta * torch.mean(\n torch.log(torch.sum(pi_old * torch.exp(At / self.eta), axis=-1))\n )\n\n \"\"\"\n KL-Divergence losses(related to alpha) implemented using methods introduced from V-MPO paper\n https://arxiv.org/abs/1909.12238\n \"\"\"\n\n KLD_pi = pi_old.detach() * (torch.log(pi_old.detach()) - torch.log(pi))\n KLD_pi = torch.sum(KLD_pi, axis=len(pi_old.shape) - 1)\n alpha_loss = torch.mean(\n self.alpha_mu * (self.eps_alpha_mu - KLD_pi.detach())\n + self.alpha_mu.detach() * KLD_pi\n )\n\n loss = critic_loss + actor_loss + eta_loss + alpha_loss\n\n self.actor_optimizer.zero_grad()\n self.critic_optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.actor.parameters(), self.clip_grad_norm)\n torch.nn.utils.clip_grad_norm_(self.critic.parameters(), self.clip_grad_norm)\n self.actor_optimizer.step()\n self.critic_optimizer.step()\n self.reset_lgr_muls()\n\n self.num_learn += 1\n\n result = {\n \"actor_loss\": actor_loss.item(),\n \"critic_loss\": critic_loss.item(),\n \"eta_loss\": eta_loss.item(),\n \"alpha_loss\": alpha_loss.item(),\n \"eta\": self.eta.item(),\n \"alpha_mu\": self.alpha_mu.item(),\n \"alpha_sigma\": self.alpha_sigma.item(),\n \"min_Q\": Q.detach().cpu().numpy().min(),\n \"max_Q\": Q.detach().cpu().numpy().max(),\n \"min_At\": At.detach().cpu().numpy().min(),\n \"max_At\": At.detach().cpu().numpy().max(),\n }\n\n return result\n\n # reset Lagrange multipliers: eta, alpha_{mu, sigma}\n def reset_lgr_muls(self):\n self.eta.data = torch.max(self.eta, self.min_eta)\n self.alpha_mu.data = torch.max(self.alpha_mu, self.min_alpha_mu)\n self.alpha_sigma.data = torch.max(self.alpha_sigma, self.min_alpha_sigma)\n\n def update_target(self):\n self.target_actor.load_state_dict(self.actor.state_dict())\n self.target_critic.load_state_dict(self.critic.state_dict())\n\n def save(self, path):\n print(f\"...Save model to {path}...\")\n torch.save(\n {\n \"actor\": self.actor.state_dict(),\n \"critic\": self.critic.state_dict(),\n \"actor_optimizer\": self.actor_optimizer.state_dict(),\n \"critic_optimizer\": self.critic_optimizer.state_dict(),\n },\n os.path.join(path, \"ckpt\"),\n )\n\n def load(self, path):\n print(f\"...Load model from {path}...\")\n checkpoint = torch.load(os.path.join(path, \"ckpt\"), map_location=self.device)\n self.actor.load_state_dict(checkpoint[\"actor\"])\n self.target_actor.load_state_dict(self.actor.state_dict())\n self.critic.load_state_dict(checkpoint[\"critic\"])\n self.target_critic.load_state_dict(self.critic.state_dict())\n self.actor_optimizer.load_state_dict(checkpoint[\"actor_optimizer\"])\n self.critic_optimizer.load_state_dict(checkpoint[\"critic_optimizer\"])\n\n def process(self, transitions, step):\n result = {}\n\n # Process per step\n self.memory.store(transitions)\n delta_t = step - self.time_t\n self.time_t = step\n\n if self.memory.size >= self.batch_size and self.time_t >= self.start_train_step:\n for i in range(self.n_epoch):\n result = self.learn()\n self.update_target()\n\n return result\n\n def sync_in(self, weights):\n self.actor.load_state_dict(weights)\n\n def sync_out(self, device=\"cpu\"):\n weights = self.actor.state_dict()\n for k, v in weights.items():\n weights[k] = v.to(device)\n sync_item = {\n \"weights\": weights,\n }\n return sync_item\n\n def interact_callback(self, transition):\n _transition = {}\n self.tmp_buffer.append(transition)\n if len(self.tmp_buffer) == self.n_step:\n for key in self.tmp_buffer[0].keys():\n _transition[key] = np.stack([t[key] for t in self.tmp_buffer], axis=1)\n\n return _transition\n" ]
[ [ "torch.mean", "torch.max", "torch.sum", "torch.multinomial", "torch.tanh", "torch.clip", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.softmax", "numpy.stack", "torch.tensor", "torch.prod", "torch.exp", "torch.nn.functional.mse_loss", "torch.log", "torch.distributions.Normal", "torch.clamp", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ding1119/BDCN-Fiber_Detect
[ "7f3db5210a1a87d02c7ef8e79038ba00a8e5ef62" ]
[ "test_image.py" ]
[ "import numpy as np\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport time\nimport re\nimport os\nimport sys\nimport cv2\nimport bdcn\nfrom datasets.dataset import Data\nimport argparse\nimport cfg\nfrom matplotlib import pyplot as plt\nimport os\nimport os.path as osp\nfrom scipy.io import savemat\n\n\ndef make_dir(data_dir):\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n\ndef test(model, args):\n test_root = args.data_root\n if args.test_lst is not None:\n with open(osp.join(test_root, args.test_lst), 'r') as f:\n test_lst = f.readlines()\n test_lst = [x.strip() for x in test_lst]\n if ' ' in test_lst[0]:\n test_lst = [x.split(' ')[0] for x in test_lst]\n else:\n test_lst = os.listdir(test_root)\n print(test_lst[0])\n save_sideouts = 1\n if save_sideouts:\n for j in range(5):\n make_dir(os.path.join(save_dir, 's2d_'+str(k)))\n make_dir(os.path.join(save_dir, 'd2s_'+str(k)))\n mean_bgr = np.array([104.00699, 116.66877, 122.67892])\n save_dir = args.res_dir\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n if args.cuda:\n model.cuda()\n model.eval()\n start_time = time.time()\n all_t = 0\n for nm in test_lst:\n data = cv2.imread(test_root + '/' + nm + '.jpg')\n # print(os.path.join(test_root, nm))\n # data = cv2.resize(data, (data.shape[1]/2, data.shape[0]/2), interpolation=cv2.INTER_LINEAR)\n data = np.array(data, np.float32)\n data -= mean_bgr\n data = data.transpose((2, 0, 1))\n data = torch.from_numpy(data).float().unsqueeze(0)\n if args.cuda:\n data = data.cuda()\n data = Variable(data)\n t1 = time.time()\n out = model(data)\n if '/' in nm:\n nm = nm.split('/')[-1]\n if save_sideouts:\n out = [F.sigmoid(x).cpu().data.numpy()[0, 0, :, :] for x in out]\n k = 1\n for j in xrange(5):\n # savemat(osp.join(save_dir, 's2d_'+str(k), nm+'.mat'), {'prob': out[j]})\n cv2.imwrite(os.path.join(save_dir, 's2d_'+str(k), '%s.jpg'%nm[i]), 255-t*255)\n # savemat(osp.join(save_dir, 'd2s_'+str(k), nm+'.mat'), {'prob': out[j+5]})\n cv2.imwrite(os.path.join(save_dir, 'd2s_'+str(k), '%s.jpg'%nm), 255-255*t)\n k += 1\n else:\n out = [F.sigmoid(out[-1]).cpu().data.numpy()[0, 0, :, :]]\n if not os.path.exists(os.path.join(save_dir, 'fuse')):\n os.mkdir(os.path.join(save_dir, 'fuse'))\n cv2.imwrite(os.path.join(save_dir, 'fuse/%s.png'%nm.split('/')[-1].split('.')[0]), 255*out[-1])\n all_t += time.time() - t1\n print( all_t)\n print( 'Overall Time use: ', time.time() - start_time)\n\ndef main():\n import time\n print(time.localtime())\n args = parse_args()\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n model = bdcn.BDCN(rate=args.rate)\n model.load_state_dict(torch.load('%s' % (args.model)))\n # print model.fuse.weight.data, model.fuse.bias.data\n print( model.fuse.weight.data)\n test(model, args)\n\ndef parse_args():\n parser = argparse.ArgumentParser('test BDCN')\n parser.add_argument('-c', '--cuda', action='store_true',\n help='whether use gpu to train network')\n parser.add_argument('-g', '--gpu', type=str, default='0',\n help='the gpu id to train net')\n parser.add_argument('-m', '--model', type=str, default='params/bdcn_10000.pth',\n help='the model to test')\n parser.add_argument('--res-dir', type=str, default='result',\n help='the dir to store result')\n parser.add_argument('--data-root', type=str, default='./')\n parser.add_argument('--test-lst', type=str, default=None)\n return parser.parse_args()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.load", "torch.from_numpy", "torch.nn.functional.sigmoid", "numpy.array", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
graviraja/seq2seq
[ "33c9eddd6386cea8bb8b3fd068f7a0d57b5bfa78" ]
[ "transformer.py" ]
[ "'''This code contains the implementation of the paper Attention is all you need.\n\nPaper: https://arxiv.org/pdf/1706.03762.pdf\nReference code: https://github.com/bentrevett/pytorch-seq2seq\n\nRelated Theory Blog post: https://graviraja.github.io/transformer/\nRelated Implemetation Blog post: https://graviraja.github.io/transformerimp/\nColab link: https://colab.research.google.com/drive/1695mi3IBaubysLCn6SwoG8LCXK4fb8aW\n'''\nimport os\nimport math\nimport time\nimport spacy\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport torchtext\nfrom torchtext.datasets import TranslationDataset, Multi30k\nfrom torchtext.data import Field, BucketIterator\n\nSEED = 1\nrandom.seed(SEED)\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\n\nspacy_de = spacy.load('de')\nspacy_en = spacy.load('en')\n\n\ndef tokenize_de(text):\n \"\"\"\n Tokenizes German text from a string into a list of strings\n \"\"\"\n return [tok.text for tok in spacy_de.tokenizer(text)]\n\n\ndef tokenize_en(text):\n \"\"\"\n Tokenizes English text from a string into a list of strings\n \"\"\"\n return [tok.text for tok in spacy_en.tokenizer(text)]\n\n\nSRC = Field(tokenize=tokenize_de, init_token='<sos>', eos_token='<eos>', lower=True, batch_first=True)\nTRG = Field(tokenize=tokenize_en, init_token='<sos>', eos_token='<eos>', lower=True, batch_first=True)\n\ntrain_data, valid_data, test_data = Multi30k.splits(exts=('.de', '.en'), fields=(SRC, TRG))\nprint('Loaded data...')\n\nprint(f\"Number of training examples: {len(train_data.examples)}\")\nprint(f\"Number of validation examples: {len(valid_data.examples)}\")\nprint(f\"Number of testing examples: {len(test_data.examples)}\")\n\nprint(f\"src: {vars(train_data.examples[0])['src']}\")\nprint(f\"trg: {vars(train_data.examples[0])['trg']}\")\n\nSRC.build_vocab(train_data, min_freq=2)\nTRG.build_vocab(train_data, min_freq=2)\nprint('Vocab builded...')\n\nprint(f\"Unique tokens in source (de) vocabulary: {len(SRC.vocab)}\")\nprint(f\"Unique tokens in target (en) vocabulary: {len(TRG.vocab)}\")\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nBATCH_SIZE = 128\n\ntrain_iterator, valid_iterator, test_iterator = BucketIterator.splits(\n (train_data, valid_data, test_data),\n batch_size=BATCH_SIZE,\n device=device)\n\n\nclass SelfAttention(nn.Module):\n '''This class implements the Multi-Head attention.\n\n Args:\n hid_dim: A integer indicating the hidden dimension.\n n_heads: A integer indicating the number of self attention heads.\n dropout: A float indicating the amount of dropout.\n device: A device to use.\n '''\n def __init__(self, hid_dim, n_heads, dropout, device):\n super().__init__()\n\n self.hid_dim = hid_dim\n self.n_heads = n_heads\n assert hid_dim % n_heads == 0, \"Number of heads must be a factor of model dimension\"\n # in paper, hid_dim = 512, n_heads = 8\n\n # query, key, value weight matrices\n self.w_q = nn.Linear(hid_dim, hid_dim)\n self.w_k = nn.Linear(hid_dim, hid_dim)\n self.w_v = nn.Linear(hid_dim, hid_dim)\n\n self.do = nn.Dropout(dropout)\n\n # linear layer to applied after concating the attention head outputs.\n self.fc = nn.Linear(hid_dim, hid_dim)\n\n # scale factor to be applied in calculation of self attention.\n self.scale = torch.sqrt(torch.FloatTensor([hid_dim // n_heads])).to(device)\n\n def forward(self, query, key, value, mask=None):\n # query => [batch_size, sent_len, hidden_dim]\n # key => [batch_size, sent_len, hidden_dim]\n # value => [batch_size, sent_len, hidden_dim]\n\n batch_size = query.shape[0]\n hidden_dim = query.shape[2]\n assert self.hid_dim == hidden_dim, \"Hidden dimensions must match\"\n\n Q = self.w_q(query)\n K = self.w_k(key)\n V = self.w_v(value)\n # Q, K, V => [batch_size, sent_len, hidden_dim]\n\n Q = Q.view(batch_size, -1, self.n_heads, self.hid_dim // self.n_heads).permute(0, 2, 1, 3)\n K = K.view(batch_size, -1, self.n_heads, self.hid_dim // self.n_heads).permute(0, 2, 1, 3)\n V = V.view(batch_size, -1, self.n_heads, self.hid_dim // self.n_heads).permute(0, 2, 1, 3)\n # Q, K, V => [batch_size, n_heads, sent_len, hid_dim//n_heads]\n\n # z = softmax[(Q.K)/sqrt(q_dim)].V\n # Q => [batch_size, n_heads, sent_len, hid_dim//n_heads]\n # K => [batch_size, n_heads, hid_dim//n_heads, sent_len]\n # Q.K => [batch_size, n_heads, sent_len, sent_len]\n energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale\n # energy => [batch_size, n_heads, sent_len, sent_len]\n\n if mask is not None:\n energy = energy.masked_fill(mask == 0, -1e10)\n attention = self.do(F.softmax(energy, dim=-1))\n # attention => [batch_size, n_heads, sent_len, sent_len]\n\n x = torch.matmul(attention, V)\n # x => [batch_size, n_heads, sent_len, hid_dim // n_heads]\n x = x.permute(0, 2, 1, 3).contiguous()\n # x => [batch_size, sent_len, n_heads, hid_dim // n_heads]\n\n # combine all heads\n x = x.view(batch_size, -1, self.hid_dim)\n\n x = self.fc(x)\n # x => [batch_size, sent_len, hid_dim]\n return x\n\n\nclass PositionwiseFeedforward(nn.Module):\n '''This class implements the Position Wise Feed forward Layer.\n\n This will be applied after the multi-head attention layer.\n\n Args:\n hid_dim: A integer indicating the hidden dimension of model.\n pf_dim: A integer indicating the position wise feed forward layer hidden dimension.\n dropout: A float indicating the amount of dropout.\n '''\n def __init__(self, hid_dim, pf_dim, dropout):\n super().__init__()\n\n self.hid_dim = hid_dim\n self.pf_dim = pf_dim # 2048 in paper\n\n # self.fc_1 = nn.Linear(hid_dim, pf_dim)\n # self.fc_2 = nn.Linear(pf_dim, hid_dim)\n\n self.fc_1 = nn.Conv1d(hid_dim, pf_dim, 1)\n self.fc_2 = nn.Conv1d(pf_dim, hid_dim, 1)\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n # x => [batch_size, sent_len, hidden_dim]\n\n x = x.permute(0, 2, 1)\n # x => [batch_size, hidden_dim, sent_len]\n\n x = self.dropout(F.relu(self.fc_1(x)))\n # x => [batch_size, pf_dim, sent_len]\n\n x = self.fc_2(x)\n # x => [batch_size, hidden_dim, sent_len]\n\n x = x.permute(0, 2, 1)\n # x => [batch_size, sent_len, hidden_dim]\n\n return x\n\n\nclass EncoderLayer(nn.Module):\n '''This is the single encoding layer module.\n\n '''\n def __init__(self, hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout, device):\n super().__init__()\n\n self.sa = self_attention(hid_dim, n_heads, dropout, device)\n self.pf = positionwise_feedforward(hid_dim, pf_dim, dropout)\n self.ln = nn.LayerNorm(hid_dim)\n self.do = nn.Dropout(dropout)\n\n def forward(self, src, src_mask):\n # src => [batch_size, sent_len, hid_dim]\n # src_mask => [batch_size, sent_len]\n\n # apply the self attention layer for the src, then add the src(residual), and then apply layer normalization\n src = self.ln(src + self.do(self.sa(src, src, src, src_mask)))\n\n # apply the self positionwise_feedforward layer for the src, then add the src(residual), and then apply layer normalization\n src = self.ln(src + self.do(self.pf(src)))\n return src\n\n\nclass PositionalEncoding(nn.Module):\n '''Implement the PE function.\n\n Args:\n d_model: A integer indicating the hidden dimension of model.\n dropout: A float indicating the amount of dropout.\n device: A device to use.\n max_len: A integer indicating the maximum number of positions for positional encoding.\n '''\n def __init__(self, d_model, dropout, device, max_len=1000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n # Compute the positional encodings once in log space.\n pe = torch.zeros(max_len, d_model).to(device)\n position = torch.arange(0.0, max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0.0, d_model, 2) * -(math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n # x => [batch_size, seq_len, hidden_dim]\n\n x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)\n return self.dropout(x)\n\n\nclass Encoder(nn.Module):\n '''This is the complete Encoder Module.\n\n It stacks multiple Encoderlayers on top of each other.\n\n Args:\n input_dim: A integer indicating the input vocab size.\n hid_dim: A integer indicating the hidden dimension of the model.\n n_layers: A integer indicating the number of encoder layers in the encoder.\n n_heads: A integer indicating the number of self attention heads.\n pf_dim: A integer indicating the hidden dimension of positionwise feedforward layer.\n encoder_layer: EncoderLayer class.\n self_attention: SelfAttention Layer class.\n positionwise_feedforward: PositionwiseFeedforward Layer class.\n positional_encoding: A Positional Encoding class.\n dropout: A float indicating the amount of dropout.\n device: A device to use.\n '''\n def __init__(self, input_dim, hid_dim, n_layers, n_heads, pf_dim, encoder_layer, self_attention, positionwise_feedforward, positional_encoding, dropout, device):\n super().__init__()\n\n self.input_dim = input_dim\n self.hid_dim = hid_dim\n self.n_layers = n_layers\n self.n_heads = n_heads\n self.pf_dim = pf_dim\n self.encoder_layer = encoder_layer\n self.self_attention = self_attention\n self.positionwise_feedforward = positionwise_feedforward\n self.poistional_encoding = positional_encoding\n self.device = device\n\n self.tok_embedding = nn.Embedding(input_dim, hid_dim)\n self.pos_embedding = nn.Embedding(1000, hid_dim) # alternate way of positional encoding\n\n # Encoder Layers\n self.layers = nn.ModuleList([encoder_layer(hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout, device) for _ in range(n_layers)])\n self.dropout = nn.Dropout(dropout)\n self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)\n\n def forward(self, src, src_mask):\n # src => [batch_size, sent_len]\n # src_mask => [batch_size, 1, 1, sent_len]\n\n src = self.dropout((self.tok_embedding(src) * self.scale))\n src = self.poistional_encoding(src)\n # src => [batch_size, sent_len, hid_dim]\n\n for layer in self.layers:\n src = layer(src, src_mask)\n\n return src\n\n\nclass DecoderLayer(nn.Module):\n '''This is the single Decoder Layer Module.\n\n Args:\n hid_dim: A integer indicating the hidden dimension of the model.\n n_heads: A integer indicating the number of self attention heads.\n pf_dim: A integer indicating the hidden dimension of positionwise feedforward layer.\n self_attention: SelfAttention class\n positionwise_feedforward: PositionwiseFeedforward Class.\n dropout: A float indicating the amount of dropout.\n device: A device to use.\n '''\n def __init__(self, hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout, device):\n super().__init__()\n\n self.sa = self_attention(hid_dim, n_heads, dropout, device)\n self.ea = self_attention(hid_dim, n_heads, dropout, device)\n self.pf = positionwise_feedforward(hid_dim, pf_dim, dropout)\n self.ln = nn.LayerNorm(hid_dim)\n self.do = nn.Dropout(dropout)\n\n def forward(self, trg, src, trg_mask, src_mask):\n # trg => [batch_size, trg_len, hid_dim]\n # src => [batch_size, src_len, hid_dim]\n # trg_mask => [batch_size, 1, trg_len, trg_len]\n # src_maks => [batch_size, 1, 1, src_len]\n\n # self attention is calculated with the target\n trg = self.ln(trg + self.do(self.sa(trg, trg, trg, trg_mask)))\n\n # encoder attention is calculated with src as key, values and trg as query.\n trg = self.ln(trg + self.do(self.ea(trg, src, src, src_mask)))\n\n # positionwise feed forward layer of the decoder\n trg = self.ln(trg + self.do(self.pf(trg)))\n\n # trg => [batch_size, trg_len, batch_size]\n return trg\n\n\nclass Decoder(nn.Module):\n '''This is the complete Decoder Module.\n\n It stacks multiple Decoderlayers on top of each other.\n\n Args:\n output_dim: A integer indicating the output vocab size.\n hid_dim: A integer indicating the hidden dimension of the model.\n n_layers: A integer indicating the number of encoder layers in the encoder.\n n_heads: A integer indicating the number of self attention heads.\n pf_dim: A integer indicating the hidden dimension of positionwise feedforward layer.\n decoder_layer: DecoderLayer class.\n self_attention: SelfAttention Layer class.\n positional_encoding: A Postional Encoding class.\n positionwise_feedforward: PositionwiseFeedforward Layer class.\n dropout: A float indicating the amount of dropout.\n device: A device to use.\n '''\n def __init__(self, output_dim, hid_dim, n_layers, n_heads, pf_dim, decoder_layer, self_attention, positionwise_feedforward, positional_encoding, dropout, device):\n super().__init__()\n\n self.output_dim = output_dim\n self.hid_dim = hid_dim\n self.n_layers = n_layers\n self.n_heads = n_heads\n self.pf_dim = pf_dim\n self.decoder_layer = decoder_layer\n self.self_attention = self_attention\n self.positionwise_feedforward = positionwise_feedforward\n self.positional_encoding = positional_encoding\n self.device = device\n\n self.tok_embedding = nn.Embedding(output_dim, hid_dim)\n self.pos_embedding = nn.Embedding(1000, hid_dim) # alternate way of positional encoding\n\n self.layers = nn.ModuleList([decoder_layer(hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout, device) for _ in range(n_layers)])\n self.fc = nn.Linear(hid_dim, output_dim)\n self.do = nn.Dropout(dropout)\n self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)\n\n def forward(self, trg, src, trg_mask, src_mask):\n # trg => [batch_size, trg_len]\n # src => [batch_size, src_len, hidden_dim]\n # trg_mask => [batch_size, 1, trg_len, trg_len]\n # src_mask => [batch_size, 1, 1, src_len]\n\n trg = self.do((self.tok_embedding(trg)) * self.scale)\n trg = self.positional_encoding(trg)\n # trg => [batch_size, trg_len, hid_dim]\n\n for layer in self.layers:\n trg = layer(trg, src, trg_mask, src_mask)\n\n trg = self.fc(trg)\n # trg => [batch_size, trg_len, output_dim]\n return trg\n\n\nclass Transformer(nn.Module):\n def __init__(self, encoder, decoder, pad_idx, device):\n super().__init__()\n\n self.encoder = encoder\n self.decoder = decoder\n self.pad_idx = pad_idx\n self.device = device\n\n def make_masks(self, src, trg):\n # src => [batch_size, src_len]\n # trg => [batch_size, trg_len]\n\n src_mask = (src != self.pad_idx).unsqueeze(1).unsqueeze(2)\n trg_pad_mask = (trg != self.pad_idx).unsqueeze(1).unsqueeze(3)\n\n trg_len = trg.shape[1]\n trg_sub_mask = torch.tril(torch.ones((trg_len, trg_len), dtype=torch.uint8, device=self.device))\n\n trg_mask = trg_pad_mask & trg_sub_mask\n\n return src_mask, trg_mask\n\n def forward(self, src, trg):\n # src => [batch_size, src_len]\n # trg => [batch_size, trg_len]\n\n src_mask, trg_mask = self.make_masks(src, trg)\n\n enc_src = self.encoder(src, src_mask)\n # enc_src => [batch_size, sent_len, hid_dim]\n\n out = self.decoder(trg, enc_src, trg_mask, src_mask)\n # out => [batch_size, trg_len, output_dim]\n\n return out\n\n\ninput_dim = len(SRC.vocab)\noutput_dim = len(TRG.vocab)\nhid_dim = 512\nn_layers = 6\nn_heads = 8\npf_dim = 2048\ndropout = 0.1\npad_idx = SRC.vocab.stoi['<pad>']\n\nPE = PositionalEncoding(hid_dim, dropout, device)\nenc = Encoder(input_dim, hid_dim, n_layers, n_heads, pf_dim, EncoderLayer, SelfAttention, PositionwiseFeedforward, PE, dropout, device)\ndec = Decoder(output_dim, hid_dim, n_layers, n_heads, pf_dim, DecoderLayer, SelfAttention, PositionwiseFeedforward, PE, dropout, device)\nmodel = Transformer(enc, dec, pad_idx, device).to(device)\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\nprint(f\"The model has {count_parameters(model) } trainable parameters\")\n\nfor p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n\nclass NoamOpt:\n \"Optim wrapper that implements rate.\"\n def __init__(self, model_size, factor, warmup, optimizer):\n self.optimizer = optimizer\n self._step = 0\n self.warmup = warmup\n self.factor = factor\n self.model_size = model_size\n self._rate = 0\n\n def step(self):\n \"Update parameters and rate\"\n self._step += 1\n rate = self.rate()\n for p in self.optimizer.param_groups:\n p['lr'] = rate\n self._rate = rate\n self.optimizer.step()\n\n def rate(self, step=None):\n \"Implement `lrate` above\"\n if step is None:\n step = self._step\n return self.factor * \\\n (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5)))\n\noptimizer = NoamOpt(hid_dim, 1, 2000, torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\n\ncriterion = nn.CrossEntropyLoss(ignore_index=pad_idx)\n\n\ndef train(model, iterator, optimizer, criterion, clip):\n\n model.train()\n\n epoch_loss = 0\n\n for i, batch in enumerate(iterator):\n\n src = batch.src\n trg = batch.trg\n\n optimizer.optimizer.zero_grad()\n\n output = model(src, trg[:, :-1])\n\n # output = [batch size, trg sent len - 1, output dim]\n # trg = [batch size, trg sent len]\n\n output = output.contiguous().view(-1, output.shape[-1])\n trg = trg[:, 1:].contiguous().view(-1)\n\n # output = [batch size * trg sent len - 1, output dim]\n # trg = [batch size * trg sent len - 1]\n\n loss = criterion(output, trg)\n\n loss.backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n\n optimizer.step()\n\n epoch_loss += loss.item()\n\n return epoch_loss / len(iterator)\n\n\ndef evaluate(model, iterator, criterion):\n\n model.eval()\n\n epoch_loss = 0\n\n with torch.no_grad():\n\n for i, batch in enumerate(iterator):\n\n src = batch.src\n trg = batch.trg\n\n output = model(src, trg[:, :-1])\n\n # output = [batch size, trg sent len - 1, output dim]\n # trg = [batch size, trg sent len]\n\n output = output.contiguous().view(-1, output.shape[-1])\n trg = trg[:, 1:].contiguous().view(-1)\n\n # output = [batch size * trg sent len - 1, output dim]\n # trg = [batch size * trg sent len - 1]\n\n loss = criterion(output, trg)\n\n epoch_loss += loss.item()\n\n return epoch_loss / len(iterator)\n\n\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs\n\n\nN_EPOCHS = 10\nCLIP = 1\nSAVE_DIR = '.'\nMODEL_SAVE_PATH = os.path.join(SAVE_DIR, 'transformer-seq2seq.pt')\n\nbest_valid_loss = float('inf')\n\nif not os.path.isdir(f'{SAVE_DIR}'):\n os.makedirs(f'{SAVE_DIR}')\n\nfor epoch in range(N_EPOCHS):\n\n start_time = time.time()\n\n train_loss = train(model, train_iterator, optimizer, criterion, CLIP)\n valid_loss = evaluate(model, valid_iterator, criterion)\n\n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), MODEL_SAVE_PATH)\n\n print(f'| Epoch: {epoch+1:03} | Time: {epoch_mins}m {epoch_secs}s| Train Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f} | Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f} |')\n\nmodel.load_state_dict(torch.load(MODEL_SAVE_PATH))\n\ntest_loss = evaluate(model, test_iterator, criterion)\n\nprint(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |')\n" ]
[ [ "torch.nn.functional.softmax", "torch.load", "torch.sin", "torch.zeros", "torch.nn.Embedding", "torch.no_grad", "torch.FloatTensor", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss", "torch.nn.Dropout", "torch.ones", "torch.arange", "torch.cos", "torch.nn.Linear", "torch.nn.Conv1d", "torch.manual_seed", "torch.nn.LayerNorm", "torch.matmul", "torch.nn.init.xavier_uniform_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sizaif/tls-diff-testing
[ "4eae4e8de6ebd1b791e32fca44b8189b04a2ea10" ]
[ "tls-diff-testing/apps/python_plot_cov/plot_cov.py" ]
[ "#!/usr/bin/python3.8\n\nimport sys\nimport matplotlib.pyplot as plot\nimport itertools\nimport random\nfrom bitarray import bitarray\n\n\n#\n# _____________________________________________________________________________\n#\ndef main(argv):\n\n files1 = ['/home/walz/data/tls-diff-testing/stimuli-20170511-the-man-tools/iteration-{0:03}/stimuli.hex.accepted.coverage'.format(i) for i in range(10)]\n\n files2 = ['/home/walz/data/tls-diff-testing/stimuli-20170510-afl/stimuli_{0:08}.txt.accepted.coverage'.format(i) for i in range(1, 11)]\n\n files3 = ['/home/walz/data/tls-diff-testing/stimuli-20170511-TLSAttacker/iteration-{0:03}/stimuli_phase{1}.hex.accepted.coverage'.format(i, j) for i, j in itertools.product(range(7), range(1, 4))]\n\n plotCoverage(readCoverage(files1), 'red')\n plotCoverage(readCoverage(files2), 'blue')\n plotCoverage(readCoverage(files3), 'green')\n\n plot.xscale('log')\n\n plot.show()\n\n\n#\n# _____________________________________________________________________________\n#\ndef plotCoverage(coverage, color):\n\n accu = None\n\n X = range(len(coverage) + 1)\n Y = [0]\n\n for cov in coverage:\n if accu is None:\n accu = bitarray(len(cov))\n elif len(accu) != len(cov):\n print('Inconsistent lengths!!')\n return \n accu = accu | cov\n Y += [accu.count(True)]\n\n plot.plot(X, Y, color=color)\n\n\n#\n# _____________________________________________________________________________\n#\ndef readCoverage(filenames, nmax=None):\n\n coverage = []\n\n# left, right = 0, 479\n# left, right = 479, 1320\n# left, right = 1320, 1614\n left, right = 479, 1614\n\n for filename in filenames:\n print('Reading file {0} ...'.format(filename))\n for line in open(filename):\n (myid, mycov) = line.split(':')[:2]\n coverage.append(bitarray(mycov.strip()[left:right]))\n\n print('Read {0} coverage entries!'.format(len(coverage)))\n\n return coverage\n\n\n#\n# _____________________________________________________________________________\n#\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.xscale", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
martagaldys/MACT20.21_Digital_tools_Big_Data_part_1
[ "06acbaba5509f46bce2d5da5d987351f854c149a" ]
[ "Marta_Galdys/parrot/parrot_.py" ]
[ "\"\"\"\nImage-colored wordcloud with boundary map\n=========================================\nA slightly more elaborate version of an image-colored wordcloud\nthat also takes edges in the image into account.\nRecreating an image similar to the parrot example.\n\"\"\"\n\nimport os\nfrom PIL import Image\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import gaussian_gradient_magnitude\n\nfrom wordcloud import WordCloud, ImageColorGenerator\n\n# get data directory (using getcwd() is needed to support running example in generated IPython notebook)\nd = os.path.dirname(__file__) if \"__file__\" in locals() else os.getcwd()\n\n# load wikipedia text on rainbow\ntext = open('../data/test.txt').read()\ntext = text.upper()\n\n# load image. This has been modified in gimp to be brighter and have more saturation.\nparrot_color = np.array(Image.open(os.path.join(d, \"parrot-by-jose-mari-gimenez2.jpg\")))\n# subsample by factor of 3. Very lossy but for a wordcloud we don't really care.\nparrot_color = parrot_color[::3, ::3]\n\n# create mask white is \"masked out\"\nparrot_mask = parrot_color.copy()\nparrot_mask[parrot_mask.sum(axis=2) == 0] = 255\n\n# some finesse: we enforce boundaries between colors so they get less washed out.\n# For that we do some edge detection in the image\nedges = np.mean([gaussian_gradient_magnitude(parrot_color[:, :, i] / 255., 2) for i in range(3)], axis=0)\nparrot_mask[edges > .08] = 255\n\n# create wordcloud. A bit sluggish, you can subsample more strongly for quicker rendering\n# relative_scaling=0 means the frequencies in the data are reflected less\n# acurately but it makes a better picture\nwc = WordCloud(max_words=2000, mask=parrot_mask, max_font_size=40, random_state=42, relative_scaling=0)\n\n# generate word cloud\nwc.generate(text)\nplt.imshow(wc)\n\n# create coloring from image\nimage_colors = ImageColorGenerator(parrot_color)\nwc.recolor(color_func=image_colors)\nplt.figure(figsize=(10, 10))\nplt.imshow(wc, interpolation=\"bilinear\")\nwc.to_file(\"parrot_new.png\")\n\nplt.figure(figsize=(10, 10))\nplt.title(\"Original Image\")\nplt.imshow(parrot_color)\n\nplt.figure(figsize=(10, 10))\nplt.title(\"Edge map\")\nplt.imshow(edges)\nplt.show()" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "scipy.ndimage.gaussian_gradient_magnitude", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
longtailfinancial/Options-Project
[ "e8ed6c293e21b6d8d4d52343f2817ea0cbe660d9" ]
[ "src/scrape_fun.py" ]
[ "import numpy as np\r\nimport pandas as pd\r\nimport sys\r\nfrom pathlib import Path\r\nimport os\r\nfrom tqdm import tqdm\r\n\r\n\r\ndef get_current_price(stock_of_interest, questrade_instance, api_key):\r\n \"\"\"\r\n Retrieves the current price of a stock using the questrade_api package.\r\n If questrade server is down, Alphavantage is used as backup. Note that Alphavantage\r\n prices are less accurate and do not track pre and post market.\r\n\r\n :param stock_of_interest: ticker symbol (string)\r\n :param questrade_instance: Questrade instance from 'questrade_api'\r\n :param api_key: API key used to access the Alphavantage server (string)\r\n :return: current price of the ticker (float)\r\n \"\"\"\r\n try:\r\n stock_id = questrade_instance.symbols_search(prefix=stock_of_interest)['symbols'][0]['symbolId']\r\n price = questrade_instance.markets_quote(stock_id)['quotes'][0]['lastTradePrice']\r\n if price is None:\r\n raise Exception\r\n except Exception:\r\n print(\"Could not retrieve price from Questrade API, attempting Alphavantage instead!\")\r\n try:\r\n price = float(pd.read_json(\"https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=\" +\r\n stock_of_interest + '&apikey=' + api_key).loc[\"05. price\"])\r\n except Exception:\r\n print('Could not retrieve price from Alphavantage. Please ensure ticker symbol exists!')\r\n sys.exit(1)\r\n return price\r\n\r\n\r\ndef retrieve_price_history(stock_of_interest, api_key, save_path):\r\n \"\"\"\r\n Retrieves historical daily closing price of a given ticker from the Alphavantage API.\r\n Adjusts ticker price and dividend payout accordingly to forward/reverse splits.\r\n Returns matrix with: date, adjusted closing price, adjusted dividend payout, and split factor\r\n Checks and appends the prices to local version of ticker history is present.\r\n\r\n :param stock_of_interest: ticker symbol (string)\r\n :param api_key: API key used to access the Alphavantage server (string)\r\n :param save_path: Path used to save data (string)\r\n :return: adjusted daily closing price and dividend payouts of the ticker (DataFrame)\r\n \"\"\"\r\n\r\n # Default parameters\r\n split_multiplier = 1\r\n data_url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=' \\\r\n + stock_of_interest + '&outputsize=full&apikey=' + api_key\r\n\r\n # Actual data starts at row 5\r\n history_data = pd.read_json(data_url)['Time Series (Daily)'].iloc[5:].reset_index()\r\n # Reset index to be datetime format\r\n history_data['index'] = pd.to_datetime(history_data['index'])\r\n # Create DataFrame to store data\r\n my_history = np.zeros((len(history_data), 4), dtype=object)\r\n # Extracting information we need\r\n for n in range(len(history_data)):\r\n # Check if stock forward/reverse split happened the following day (data is present -> past)\r\n # n > 0 since we are calling 'n - 1'\r\n if n > 0:\r\n temp_multiplier = float(history_data.iloc[n - 1, 1]['8. split coefficient'])\r\n if temp_multiplier != 1:\r\n split_multiplier = split_multiplier * temp_multiplier\r\n # Update DataFrame\r\n my_history[n] = [history_data.iloc[n, 0],\r\n round((float(history_data.iloc[n, 1]['4. close']) / split_multiplier), 3),\r\n round((float(history_data.iloc[n, 1]['7. dividend amount']) / split_multiplier), 3),\r\n round(split_multiplier, 3)]\r\n # Reverse list such that the oldest price is first\r\n my_history = my_history[::-1]\r\n # Removes the last row if that is the current date\r\n if my_history[-1, 0].date() == pd.to_datetime(\"today\").date():\r\n my_history = my_history[:-1, :]\r\n\r\n # Convert to DataFrame and store\r\n my_history_df = pd.DataFrame(data=my_history,\r\n columns=[\"date\", \"close\", \"dividend amount\", \"adjustment factor\"])\r\n\r\n # Create data directory if it doesn't exist\r\n Path(save_path).mkdir(exist_ok=True)\r\n\r\n try:\r\n old_data = pd.read_csv(os.path.abspath(os.path.join(save_path, stock_of_interest)) + \".csv\")\r\n old_data[\"date\"] = pd.to_datetime(old_data[\"date\"])\r\n # Sometimes there are rounding errors when using \"read_csv\"\r\n old_data[\"close\"] = round(old_data[\"close\"], 3)\r\n old_data[\"dividend amount\"] = round(old_data[\"dividend amount\"], 3)\r\n old_data[\"adjustment factor\"] = round(old_data[\"adjustment factor\"], 3)\r\n my_history_df = pd.concat([old_data, my_history_df], ignore_index=True).drop_duplicates().reset_index(drop=True)\r\n if len(my_history_df['date']) != len(set(my_history_df['date'])):\r\n # Discrepancies are appended to the end of the concatenation\r\n my_history_df = my_history_df.sort_values(by=[\"date\"])\r\n print(my_history_df[my_history_df.duplicated(subset=[\"date\"],\r\n keep=False)])\r\n raise Exception(\"Discrepancies between old and new files. This could be due to forward/reverse splits.\"\r\n \"Data not updated, please manually fix!\")\r\n else:\r\n my_history_df.to_csv(path_or_buf=(save_path + stock_of_interest + '.csv'),\r\n index=False)\r\n except FileNotFoundError:\r\n print(\"Local history of ticker '\" + stock_of_interest + \"' does not exist, created new file.\")\r\n my_history_df.to_csv(path_or_buf=(os.path.abspath(os.path.join(save_path, stock_of_interest)) + \".csv\"),\r\n index=False)\r\n\r\n return my_history_df\r\n\r\n\r\ndef hist_option_data(stock_of_interest, option_data_path, history_df):\r\n \"\"\"\r\n This function aims to aggregate and adjust all option data for a given ticker.\r\n Adjustment is made based on historical splits. For example, had a stock undergone\r\n a forward split of factor 2, all previous strike price, ask/bid price ...etc are\r\n divided by 2. On the flip side, ask/bid size, volume ...etc are multiplied by 2.\r\n Function has to be modified depending on structure of options data.\r\n\r\n :param stock_of_interest: ticker symbol (string)\r\n :param option_data_path: path where all the option data files are stored (string)\r\n :param history_df: historical end of day prices for ticker (DataFrame)\r\n :return: options_df: all ticker specific options data (DataFrame)\r\n \"\"\"\r\n options_df = pd.DataFrame()\r\n\r\n # Nested loading of options data\r\n my_years = [year for year in os.listdir(option_data_path) if not year.startswith(\".\")]\r\n for year in tqdm(my_years, desc=\"year\"):\r\n my_months = [month for month in os.listdir(os.path.join(option_data_path, year)) if not month.startswith(\".\")]\r\n for month in tqdm(my_months, desc=\"month\"):\r\n my_days = [day for day in os.listdir(os.path.join(option_data_path, year, month)) if\r\n not day.startswith(\".\")]\r\n for day in tqdm(my_days, desc=\"day\"):\r\n try:\r\n daily_option_data = pd.read_csv(os.path.abspath(os.path.join(option_data_path, year, month, day)))\r\n except FileNotFoundError:\r\n raise SystemExit(\"Option data for \" + stock_of_interest + \" not found in path:\" +\r\n os.path.abspath(os.path.join(option_data_path, year, month, day)))\r\n # Filtering for the right symbol\r\n temp_data = daily_option_data[daily_option_data[\"Symbol\"] == stock_of_interest]\r\n # Dropping columns are reordering others\r\n temp_data = temp_data.drop(columns=[\"optionkey\", \"Symbol\", \"UnderlyingPrice\"])[[\r\n \"DataDate\", \"ExpirationDate\", \"PutCall\", \"StrikePrice\", \"AskPrice\",\r\n \"AskSize\", \"BidPrice\", \"BidSize\", \"LastPrice\", \"Volume\", \"openinterest\"]]\r\n # Change to datetime\r\n temp_data[\"DataDate\"] = pd.to_datetime(temp_data[\"DataDate\"]).dt.date\r\n temp_data[\"ExpirationDate\"] = pd.to_datetime(temp_data[\"ExpirationDate\"]).dt.date\r\n if len(np.unique(temp_data[\"DataDate\"])) == 1:\r\n temp_day = np.unique(temp_data[\"DataDate\"])[0]\r\n else:\r\n raise SystemExit(\"More than one unique day in each file! Discount Options Data seriously bugged :/\")\r\n # Retrieve adjustment factor and closing price\r\n temp_df = history_df[history_df[\"date\"] == temp_day][[\"adjustment factor\", \"close\"]]\r\n temp_adjustment_factor = float(temp_df[\"adjustment factor\"])\r\n temp_closing_price = float(temp_df[\"close\"])\r\n # Adjusting option data as needed\r\n temp_data[[\"StrikePrice\", \"AskPrice\", \"BidPrice\", \"LastPrice\"]] = \\\r\n temp_data[[\r\n \"StrikePrice\", \"AskPrice\", \"BidPrice\", \"LastPrice\"]] / temp_adjustment_factor\r\n temp_data[[\"AskSize\", \"BidSize\", \"Volume\", \"openinterest\"]] = \\\r\n temp_data[[\"AskSize\", \"BidSize\", \"Volume\", \"openinterest\"]] * temp_adjustment_factor\r\n temp_data[\"closing price\"] = temp_closing_price\r\n # Append to DataFrame\r\n options_df = options_df.append(temp_data).reset_index(drop=True)\r\n\r\n # Renaming columns\r\n options_df = options_df.rename(columns={\"DataDate\": \"date\",\r\n \"ExpirationDate\": \"expiration date\",\r\n \"PutCall\": \"type\",\r\n \"StrikePrice\": \"strike price\",\r\n \"AskPrice\": \"ask price\",\r\n \"AskSize\": \"ask size\",\r\n \"BidPrice\": \"bid price\",\r\n \"BidSize\": \"bid size\",\r\n \"LastPrice\": \"last price\",\r\n \"openinterest\": \"open interest\",\r\n \"Volume\": \"volume\"})\r\n\r\n options_df = options_df.sort_values(by=[\"date\", \"expiration date\", \"strike price\"]).reset_index(drop=True)\r\n return options_df\r\n\r\n\r\ndef add_dividends(stock_of_interest, options_df, history_df, dividends_data_path, save_path):\r\n \"\"\"\r\n This function appends the price contribution due to dividends for the \"posting\" and\r\n \"expiration\" dates of each option. This allows us to be one step closer to working\r\n with the \"true\" price of the security on those dates.\r\n\r\n :param stock_of_interest: ticker symbol (string)\r\n :param options_df: all option data for specified ticker (DataFrame)\r\n :param history_df: historical end of day prices for ticker (DataFrame)\r\n :param dividends_data_path: path where dividend data is stored (str)\r\n :param save_path: path to save final options data (str)\r\n :return: None\r\n \"\"\"\r\n # Load dividend data\r\n try:\r\n my_dividends_df = pd.read_csv(os.path.abspath(os.path.join(dividends_data_path, stock_of_interest)) + \"_ts.csv\")\r\n my_dividends_df[\"date\"] = pd.to_datetime(my_dividends_df[\"date\"]).dt.date\r\n except FileNotFoundError:\r\n raise SystemExit(\"Dividend data for \" + stock_of_interest + \" not found in path: \" +\r\n os.path.abspath(os.path.join(dividends_data_path, stock_of_interest)) + \"_ts.csv\")\r\n\r\n # Initialize empty containers\r\n my_date_div = pd.Series()\r\n my_exp_date_div = pd.Series()\r\n my_exp_closing = pd.Series()\r\n # adding dividend contribution info to DataFrame\r\n for my_date in sorted(options_df[\"date\"].unique()):\r\n temp_options_df = options_df[options_df[\"date\"] == my_date]\r\n date_div_price = float(my_dividends_df[my_dividends_df[\"date\"] == my_date][\"amount\"])\r\n my_date_div = my_date_div.append(pd.Series(np.ones(temp_options_df.shape[0]) * date_div_price)).reset_index(\r\n drop=True)\r\n for my_exp_date in sorted(temp_options_df[\"expiration date\"].unique()):\r\n exp_day_length = temp_options_df[temp_options_df[\"expiration date\"] == my_exp_date].shape[0]\r\n exp_day_div_price = float(my_dividends_df[my_dividends_df[\"date\"] == my_exp_date][\"amount\"])\r\n exp_day_closing_price = float(history_df[history_df[\"date\"] == my_exp_date][\"close\"])\r\n my_exp_date_div = my_exp_date_div.append(\r\n pd.Series(np.ones(exp_day_length) * exp_day_div_price)).reset_index(drop=True)\r\n my_exp_closing = my_exp_closing.append(\r\n pd.Series(np.ones(exp_day_length) * exp_day_closing_price)).reset_index(drop=True)\r\n\r\n # Adding additional columns\r\n options_df[\"exp date closing price\"] = my_exp_closing\r\n options_df[\"date div\"] = my_date_div\r\n options_df[\"exp date div\"] = my_exp_date_div\r\n\r\n # Save adjusted options data\r\n Path(save_path).mkdir(exist_ok=True)\r\n options_df.to_csv(path_or_buf=(os.path.abspath(os.path.join(save_path, stock_of_interest)) + \".csv\"),\r\n index=False)\r\n print(\"All adjusted option data for \" + stock_of_interest + \" has been aggregated and saved to path: \" +\r\n os.path.abspath(os.path.join(save_path, stock_of_interest)) + \".csv\")\r\n return\r\n" ]
[ [ "pandas.concat", "pandas.to_datetime", "pandas.Series", "numpy.unique", "pandas.DataFrame", "numpy.ones", "pandas.read_json" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
blackmints/3DGCN
[ "550fa682c74bda5b13c421306a100d684082decc" ]
[ "model/dataset.py" ]
[ "from keras.utils import to_categorical, Sequence\nfrom rdkit import Chem\nfrom rdkit.Chem import rdmolops, AllChem\nimport numpy as np\n\n\ndef one_hot(x, allowable_set):\n # If x is not in allowed set, use last index\n if x not in allowable_set:\n x = allowable_set[-1]\n\n return list(map(lambda s: x == s, allowable_set))\n\n\nclass Dataset(object):\n def __init__(self, dataset, batch=128):\n self.dataset = dataset\n self.path = \"../../data/{}.sdf\".format(dataset)\n self.task = \"binary\"\n self.target_name = \"active\"\n self.max_atoms = 0\n\n self.batch = batch\n self.outputs = 1\n\n self.mols = []\n self.coords = []\n self.target = []\n self.x, self.c, self.y = {}, {}, {}\n\n self.use_atom_symbol = True\n self.use_degree = True\n self.use_hybridization = True\n self.use_implicit_valence = True\n self.use_partial_charge = False\n self.use_formal_charge = True\n self.use_ring_size = True\n self.use_hydrogen_bonding = True\n self.use_acid_base = True\n self.use_aromaticity = True\n self.use_chirality = True\n self.use_num_hydrogen = True\n\n # Load data\n self.load_dataset()\n\n # Calculate number of features\n mp = MPGenerator([], [], [], 1,\n use_atom_symbol=self.use_atom_symbol,\n use_degree=self.use_degree,\n use_hybridization=self.use_hybridization,\n use_implicit_valence=self.use_implicit_valence,\n use_partial_charge=self.use_partial_charge,\n use_formal_charge=self.use_formal_charge,\n use_ring_size=self.use_ring_size,\n use_hydrogen_bonding=self.use_hydrogen_bonding,\n use_acid_base=self.use_acid_base,\n use_aromaticity=self.use_aromaticity,\n use_chirality=self.use_chirality,\n use_num_hydrogen=self.use_num_hydrogen)\n self.num_features = mp.get_num_features()\n\n # Normalize\n if self.task == \"regression\":\n self.mean = np.mean(self.y[\"train\"])\n self.std = np.std(self.y[\"train\"])\n\n self.y[\"train\"] = (self.y[\"train\"] - self.mean) / self.std\n self.y[\"valid\"] = (self.y[\"valid\"] - self.mean) / self.std\n self.y[\"test\"] = (self.y[\"test\"] - self.mean) / self.std\n else:\n self.mean = 0\n self.std = 1\n\n def load_dataset(self):\n # Dataset parameters\n if self.dataset == \"bace_reg\" or self.dataset == \"delaney\" or self.dataset == \"freesolv\":\n self.task = \"regression\"\n self.target_name = \"target\"\n\n elif self.dataset == \"tox21\":\n self.target_name = \"NR-ER\"\n\n # elif self.dataset == \"tox21\": # Multitask tox21\n # self.target_name = [\"NR-Aromatase\", \"NR-AR\", \"NR-AR-LBD\", \"NR-ER\", \"NR-ER-LBD\", \"NR-PPAR-gamma\", \"NR-AhR\",\n # \"SR-ARE\", \"SR-ATAD5\", \"SR-HSE\", \"SR-MMP\", \"SR-p53\"]\n\n else:\n pass\n\n # Load file\n x, c, y = [], [], []\n mols = Chem.SDMolSupplier(self.path)\n\n for mol in mols:\n if mol is not None:\n # Multitask\n if type(self.target_name) is list:\n y.append([float(mol.GetProp(t)) if t in mol.GetPropNames() else -1 for t in self.target_name])\n self.outputs = len(self.target_name)\n\n # Singletask\n elif self.target_name in mol.GetPropNames():\n _y = float(mol.GetProp(self.target_name))\n if _y == -1:\n continue\n else:\n y.append(_y)\n\n else:\n continue\n\n x.append(mol)\n c.append(mol.GetConformer().GetPositions())\n assert len(x) == len(y)\n\n # Filter and update maximum number of atoms\n new_x, new_c, new_y = [], [], []\n if self.max_atoms > 0:\n for mol, coo, tar in zip(x, c, y):\n if mol.GetNumAtoms() <= self.max_atoms:\n new_x.append(mol)\n new_c.append(coo)\n new_y.append(tar)\n\n x = new_x\n c = new_c\n y = new_y\n\n else:\n for mol, tar in zip(x, y):\n self.max_atoms = max(self.max_atoms, mol.GetNumAtoms())\n\n if self.task != \"regression\":\n self.mols, self.coords, self.target = np.array(x), np.array(c), np.array(y, dtype=int)\n else:\n self.mols, self.coords, self.target = np.array(x), np.array(c), np.array(y)\n\n # Shuffle data\n idx = np.random.permutation(len(self.mols))\n self.mols, self.coords, self.target = self.mols[idx], self.coords[idx], self.target[idx]\n\n # Split data\n spl1 = int(len(self.mols) * 0.2)\n spl2 = int(len(self.mols) * 0.1)\n\n self.x = {\"train\": self.mols[spl1:],\n \"valid\": self.mols[spl2:spl1],\n \"test\": self.mols[:spl2]}\n self.c = {\"train\": self.coords[spl1:],\n \"valid\": self.coords[spl2:spl1],\n \"test\": self.coords[:spl2]}\n self.y = {\"train\": self.target[spl1:],\n \"valid\": self.target[spl2:spl1],\n \"test\": self.target[:spl2]}\n\n def save_dataset(self, path, pred=None, target=\"test\", filename=None):\n mols = []\n for idx, (x, c, y) in enumerate(zip(self.x[target], self.c[target], self.y[target])):\n x.SetProp(\"true\", str(y * self.std + self.mean))\n if pred is not None:\n x.SetProp(\"pred\", str(pred[idx][0] * self.std + self.mean))\n mols.append(x)\n\n if filename is not None:\n w = Chem.SDWriter(path + filename + \".sdf\")\n else:\n w = Chem.SDWriter(path + target + \".sdf\")\n for mol in mols:\n if mol is not None:\n w.write(mol)\n\n def replace_dataset(self, path, subset=\"test\", target_name=\"target\"):\n x, c, y = [], [], []\n mols = Chem.SDMolSupplier(path)\n\n for mol in mols:\n if mol is not None:\n # Multitask\n if type(target_name) is list:\n y.append([float(mol.GetProp(t)) if t in mol.GetPropNames() else -1 for t in target_name])\n self.outputs = len(self.target_name)\n\n # Singletask\n elif target_name in mol.GetPropNames():\n _y = float(mol.GetProp(target_name))\n if _y == -1:\n continue\n else:\n y.append(_y)\n\n else:\n continue\n\n x.append(mol)\n c.append(mol.GetConformer().GetPositions())\n\n # Normalize\n x = np.array(x)\n c = np.array(c)\n y = (np.array(y) - self.mean) / self.std\n\n self.x[subset] = x\n self.c[subset] = c\n self.y[subset] = y.astype(int) if self.task != \"regression\" else y\n\n def set_features(self, use_atom_symbol=True, use_degree=True, use_hybridization=True, use_implicit_valence=True,\n use_partial_charge=False, use_formal_charge=True, use_ring_size=True, use_hydrogen_bonding=True,\n use_acid_base=True, use_aromaticity=True, use_chirality=True, use_num_hydrogen=True):\n\n self.use_atom_symbol = use_atom_symbol\n self.use_degree = use_degree\n self.use_hybridization = use_hybridization\n self.use_implicit_valence = use_implicit_valence\n self.use_partial_charge = use_partial_charge\n self.use_formal_charge = use_formal_charge\n self.use_ring_size = use_ring_size\n self.use_hydrogen_bonding = use_hydrogen_bonding\n self.use_acid_base = use_acid_base\n self.use_aromaticity = use_aromaticity\n self.use_chirality = use_chirality\n self.use_num_hydrogen = use_num_hydrogen\n\n # Calculate number of features\n mp = MPGenerator([], [], [], 1,\n use_atom_symbol=self.use_atom_symbol,\n use_degree=self.use_degree,\n use_hybridization=self.use_hybridization,\n use_implicit_valence=self.use_implicit_valence,\n use_partial_charge=self.use_partial_charge,\n use_formal_charge=self.use_formal_charge,\n use_ring_size=self.use_ring_size,\n use_hydrogen_bonding=self.use_hydrogen_bonding,\n use_acid_base=self.use_acid_base,\n use_aromaticity=self.use_aromaticity,\n use_chirality=self.use_chirality,\n use_num_hydrogen=self.use_num_hydrogen)\n self.num_features = mp.get_num_features()\n\n def generator(self, target, task=None):\n return MPGenerator(self.x[target], self.c[target], self.y[target], self.batch,\n task=task if task is not None else self.task,\n num_atoms=self.max_atoms,\n use_atom_symbol=self.use_atom_symbol,\n use_degree=self.use_degree,\n use_hybridization=self.use_hybridization,\n use_implicit_valence=self.use_implicit_valence,\n use_partial_charge=self.use_partial_charge,\n use_formal_charge=self.use_formal_charge,\n use_ring_size=self.use_ring_size,\n use_hydrogen_bonding=self.use_hydrogen_bonding,\n use_acid_base=self.use_acid_base,\n use_aromaticity=self.use_aromaticity,\n use_chirality=self.use_chirality,\n use_num_hydrogen=self.use_num_hydrogen)\n\n\nclass MPGenerator(Sequence):\n def __init__(self, x_set, c_set, y_set, batch, task=\"binary\", num_atoms=0,\n use_degree=True, use_hybridization=True, use_implicit_valence=True, use_partial_charge=False,\n use_formal_charge=True, use_ring_size=True, use_hydrogen_bonding=True, use_acid_base=True,\n use_aromaticity=True, use_chirality=True, use_num_hydrogen=True, use_atom_symbol=True):\n self.x, self.c, self.y = x_set, c_set, y_set\n\n self.batch = batch\n self.task = task\n self.num_atoms = num_atoms\n\n self.use_atom_symbol = use_atom_symbol\n self.use_degree = use_degree\n self.use_hybridization = use_hybridization\n self.use_implicit_valence = use_implicit_valence\n self.use_partial_charge = use_partial_charge\n self.use_formal_charge = use_formal_charge\n self.use_ring_size = use_ring_size\n self.use_hydrogen_bonding = use_hydrogen_bonding\n self.use_acid_base = use_acid_base\n self.use_aromaticity = use_aromaticity\n self.use_chirality = use_chirality\n self.use_num_hydrogen = use_num_hydrogen\n\n self.hydrogen_donor = Chem.MolFromSmarts(\"[$([N;!H0;v3,v4&+1]),$([O,S;H1;+0]),n&H1&+0]\")\n self.hydrogen_acceptor = Chem.MolFromSmarts(\n \"[$([O,S;H1;v2;!$(*-*=[O,N,P,S])]),$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=[O,N,P,S])]),n&H0&+0,$([o,s;+0;!$([o,s]:n);!$([o,s]:c:n)])]\")\n self.acidic = Chem.MolFromSmarts(\"[$([C,S](=[O,S,P])-[O;H1,-1])]\")\n self.basic = Chem.MolFromSmarts(\n \"[#7;+,$([N;H2&+0][$([C,a]);!$([C,a](=O))]),$([N;H1&+0]([$([C,a]);!$([C,a](=O))])[$([C,a]);!$([C,a](=O))]),$([N;H0&+0]([C;!$(C(=O))])([C;!$(C(=O))])[C;!$(C(=O))])]\")\n\n def __len__(self):\n return int(np.ceil(len(self.x) / float(self.batch)))\n\n def __getitem__(self, idx):\n batch_x = self.x[idx * self.batch:(idx + 1) * self.batch]\n batch_c = self.c[idx * self.batch:(idx + 1) * self.batch]\n batch_y = self.y[idx * self.batch:(idx + 1) * self.batch]\n\n if self.task == \"category\":\n return self.tensorize(batch_x, batch_c), to_categorical(batch_y)\n elif self.task == \"binary\":\n return self.tensorize(batch_x, batch_c), np.array(batch_y, dtype=int)\n elif self.task == \"regression\":\n return self.tensorize(batch_x, batch_c), np.array(batch_y, dtype=float)\n elif self.task == \"input_only\":\n return self.tensorize(batch_x, batch_c)\n\n def tensorize(self, batch_x, batch_c):\n atom_tensor = np.zeros((len(batch_x), self.num_atoms, self.get_num_features()))\n adjm_tensor = np.zeros((len(batch_x), self.num_atoms, self.num_atoms))\n posn_tensor = np.zeros((len(batch_x), self.num_atoms, self.num_atoms, 3))\n\n for mol_idx, mol in enumerate(batch_x):\n Chem.RemoveHs(mol)\n mol_atoms = mol.GetNumAtoms()\n\n # Atom features\n atom_tensor[mol_idx, :mol_atoms, :] = self.get_atom_features(mol)\n\n # Adjacency matrix\n adjms = np.array(rdmolops.GetAdjacencyMatrix(mol), dtype=\"float\")\n\n # Normalize adjacency matrix by D^(-1/2) * A_hat * D^(-1/2), Kipf et al. 2016\n adjms += np.eye(mol_atoms)\n degree = np.array(adjms.sum(1))\n deg_inv_sqrt = np.power(degree, -0.5)\n deg_inv_sqrt[np.isinf(deg_inv_sqrt)] = 0.\n deg_inv_sqrt = np.diag(deg_inv_sqrt)\n\n adjms = np.matmul(np.matmul(deg_inv_sqrt, adjms), deg_inv_sqrt)\n\n adjm_tensor[mol_idx, : mol_atoms, : mol_atoms] = adjms\n\n # Relative position matrix\n for atom_idx in range(mol_atoms):\n pos_c = batch_c[mol_idx][atom_idx]\n\n for neighbor_idx in range(mol_atoms):\n pos_n = batch_c[mol_idx][neighbor_idx]\n\n # Direction should be Neighbor -> Center\n n_to_c = [pos_c[0] - pos_n[0], pos_c[1] - pos_n[1], pos_c[2] - pos_n[2]]\n posn_tensor[mol_idx, atom_idx, neighbor_idx, :] = n_to_c\n\n return [atom_tensor, adjm_tensor, posn_tensor]\n\n def get_num_features(self):\n mol = Chem.MolFromSmiles(\"CC\")\n return len(self.get_atom_features(mol)[0])\n\n def get_atom_features(self, mol):\n AllChem.ComputeGasteigerCharges(mol)\n Chem.AssignStereochemistry(mol)\n\n hydrogen_donor_match = sum(mol.GetSubstructMatches(self.hydrogen_donor), ())\n hydrogen_acceptor_match = sum(mol.GetSubstructMatches(self.hydrogen_acceptor), ())\n acidic_match = sum(mol.GetSubstructMatches(self.acidic), ())\n basic_match = sum(mol.GetSubstructMatches(self.basic), ())\n\n ring = mol.GetRingInfo()\n\n m = []\n for atom_idx in range(mol.GetNumAtoms()):\n atom = mol.GetAtomWithIdx(atom_idx)\n\n o = []\n o += one_hot(atom.GetSymbol(), ['C', 'O', 'N', 'S', 'Cl', 'F', 'Br', 'P',\n 'I', 'Si', 'B', 'Na', 'Sn', 'Se', 'other']) if self.use_atom_symbol else []\n o += one_hot(atom.GetDegree(), [0, 1, 2, 3, 4, 5, 6]) if self.use_degree else []\n o += one_hot(atom.GetHybridization(), [Chem.rdchem.HybridizationType.SP,\n Chem.rdchem.HybridizationType.SP2,\n Chem.rdchem.HybridizationType.SP3,\n Chem.rdchem.HybridizationType.SP3D,\n Chem.rdchem.HybridizationType.SP3D2]) if self.use_hybridization else []\n o += one_hot(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6]) if self.use_implicit_valence else []\n o += one_hot(atom.GetFormalCharge(), [-3, -2, -1, 0, 1, 2, 3]) if self.use_degree else []\n # o += [atom.GetProp(\"_GasteigerCharge\")] if self.use_partial_charge else [] # some molecules return NaN\n o += [atom.GetIsAromatic()] if self.use_aromaticity else []\n o += [ring.IsAtomInRingOfSize(atom_idx, 3),\n ring.IsAtomInRingOfSize(atom_idx, 4),\n ring.IsAtomInRingOfSize(atom_idx, 5),\n ring.IsAtomInRingOfSize(atom_idx, 6),\n ring.IsAtomInRingOfSize(atom_idx, 7),\n ring.IsAtomInRingOfSize(atom_idx, 8)] if self.use_ring_size else []\n o += one_hot(atom.GetTotalNumHs(), [0, 1, 2, 3, 4]) if self.use_num_hydrogen else []\n\n if self.use_chirality:\n try:\n o += one_hot(atom.GetProp('_CIPCode'), [\"R\", \"S\"]) + [atom.HasProp(\"_ChiralityPossible\")]\n except:\n o += [False, False] + [atom.HasProp(\"_ChiralityPossible\")]\n if self.use_hydrogen_bonding:\n o += [atom_idx in hydrogen_donor_match]\n o += [atom_idx in hydrogen_acceptor_match]\n if self.use_acid_base:\n o += [atom_idx in acidic_match]\n o += [atom_idx in basic_match]\n\n m.append(o)\n\n return np.array(m, dtype=float)\n" ]
[ [ "numpy.diag", "numpy.power", "numpy.eye", "numpy.matmul", "numpy.std", "numpy.mean", "numpy.array", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MSanKeys963/gramex
[ "8ac5fd6e79d100982fdc9e9308d9a6250ce021e2" ]
[ "gramex/data.py" ]
[ "'''\nInteract with data from the browser\n'''\nimport io\nimport os\nimport re\nimport time\nimport json\nimport sqlalchemy as sa\nimport pandas as pd\nimport gramex.cache\nfrom tornado.escape import json_encode\nfrom gramex.config import merge, app_log\nfrom orderedattrdict import AttrDict\n\n_ENGINE_CACHE = {}\n_METADATA_CACHE = {}\n_FOLDER = os.path.dirname(os.path.abspath(__file__))\n# Dummy path used by _path_safe to detect sub-directories\n_path_safe_root = os.path.realpath('/root/dir')\n# Aggregator separator. ?col|SUM treats SUM as an aggregate function\n_agg_sep = '|'\n# List of aggregated types returned by operators (if different from column type)\n# Note: For aggregation functions, see:\n# SQLite: https://www.sqlite.org/lang_aggfunc.html\n# MySQL: https://dev.mysql.com/doc/refman/8.0/en/group-by-functions.html\n# PostgreSQL: https://www.postgresql.org/docs/9.5/static/functions-aggregate.html\n# SQL Server: http://bit.ly/2MYPgQi\n# DB2: https://ibm.co/2Kfbnjw\n# Oracle: https://docs.oracle.com/database/121/SQLRF/functions003.htm\n_agg_type = {\n 'sum': float,\n 'count': int,\n 'avg': float,\n 'stdev': float, # MS SQL version of stddev\n 'stddev': float,\n 'rank': int,\n 'percent_rank': float,\n # The following types are the same as the columns\n # first, last, min, max, median\n}\n# List of Python types returned by SQLAlchemy\n_numeric_types = {'int', 'long', 'float', 'Decimal'}\n# Data processing plugins.\n# e.g. plugins['mongodb'] = {'filter': fn, 'insert': fn, ...}\nplugins = {}\n\n\ndef _transform_fn(transform, transform_kwargs):\n if transform is not None and transform_kwargs is not None:\n return lambda v: transform(v, **transform_kwargs)\n return transform\n\n\ndef _replace(engine, args, *vars, **kwargs):\n escape = _sql_safe if engine == 'sqlalchemy' else _path_safe\n params = {k: v[0] for k, v in args.items() if len(v) > 0 and escape(v[0])}\n\n def _format(val):\n if isinstance(val, str):\n return val.format(**params)\n if isinstance(val, list):\n return [_format(v) for v in val]\n if isinstance(val, dict):\n return AttrDict([(k, _format(v)) for k, v in val.items()])\n return val\n\n return _format(list(vars)) + [_format(kwargs)]\n\n\ndef filter(url, args={}, meta={}, engine=None, ext=None, columns=None,\n query=None, queryfile=None, transform=None, transform_kwargs=None, **kwargs):\n '''\n Filters data using URL query parameters. Typical usage::\n\n filtered = gramex.data.filter(dataframe, args=handler.args)\n filtered = gramex.data.filter('file.csv', args=handler.args)\n filtered = gramex.data.filter('mysql://server/db', table='table', args=handler.args)\n\n It accepts the following parameters:\n\n :arg source url: Pandas DataFrame, sqlalchemy URL, directory or file name,\n http(s) data file, all `.format``-ed using ``args``.\n :arg dict args: URL query parameters as a dict of lists. Pass handler.args or parse_qs results\n :arg dict meta: this dict is updated with metadata during the course of filtering\n :arg str engine: over-rides the auto-detected engine. Can be 'dataframe', 'file',\n 'http', 'https', 'sqlalchemy', 'dir'\n :arg str ext: file extension (if url is a file). Defaults to url extension\n :arg dict columns: database column names to create if required (if url is a database).\n Keys are column names. Values can be SQL types, or dicts with these keys:\n - ``type`` (str), e.g. ``\"VARCHAR(10)\"``\n - ``default`` (str/int/float/bool), e.g. ``\"[email protected]\"``\n - ``nullable`` (bool), e.g. ``False``\n - ``primary_key`` (bool), e.g. ``True`` -- used only when creating new tables\n - ``autoincrement`` (bool), e.g. ``True`` -- used only when creating new tables\n :arg str query: optional SQL query to execute (if url is a database),\n ``.format``-ed using ``args`` and supports SQLAlchemy SQL parameters.\n Loads entire result in memory before filtering.\n :arg str queryfile: optional SQL query file to execute (if url is a database).\n Same as specifying the ``query:`` in a file. Overrides ``query:``\n :arg function transform: optional in-memory transform of source data. Takes\n the result of gramex.cache.open or gramex.cache.query. Must return a\n DataFrame. Applied to both file and SQLAlchemy urls.\n :arg dict transform_kwargs: optional keyword arguments to be passed to the\n transform function -- apart from data\n :arg dict kwargs: Additional parameters are passed to\n :py:func:`gramex.cache.open`, ``sqlalchemy.create_engine`` or the plugin's filter\n :return: a filtered DataFrame\n\n Remaining kwargs are passed to :py:func:`gramex.cache.open` if ``url`` is a file, or\n ``sqlalchemy.create_engine`` if ``url`` is a SQLAlchemy URL. In particular:\n\n :arg str table: table name (if url is an SQLAlchemy URL), ``.format``-ed\n using ``args``.\n\n If this is used in a handler as::\n\n filtered = gramex.data.filter(dataframe, args=handler.args)\n\n ... then calling the handler with ``?x=1&y=2`` returns all rows in\n ``dataframe`` where x is 1 and y is 2.\n\n If a table or query is passed to an SQLAlchemy url, it is formatted using\n ``args``. For example::\n\n data = gramex.data.filter('mysql://server/db', table='{xxx}', args=handler.args)\n\n ... when passed ``?xxx=sales`` returns rows from the sales table. Similarly::\n\n data = gramex.data.filter('mysql://server/db', args=handler.args,\n query='SELECT {col}, COUNT(*) FROM table GROUP BY {col}')\n\n ... when passsed ``?col=City`` replaces ``{col}`` with ``City``.\n\n **NOTE**: To avoid SQL injection attacks, only values without spaces are\n allowed. So ``?col=City Name`` or ``?col=City+Name`` **will not** work.\n\n The URL supports operators filter like this:\n\n - ``?x`` selects x is not null\n - ``?x!`` selects x is null\n - ``?x=val`` selects x == val\n - ``?x!=val`` selects x != val\n - ``?x>=val`` selects x > val\n - ``?x>~=val`` selects x >= val\n - ``?x<=val`` selects x < val\n - ``?x<~=val`` selects x <= val\n - ``?x~=val`` selects x matches val as a regular expression\n - ``?x!~=val`` selects x does not match val as a regular expression\n\n Multiple filters are combined into an AND clause. Ranges can also be\n specified like this:\n\n - ``?x=a&y=b`` selects x = a AND y = b\n - ``?x>=100&x<=200`` selects x > 100 AND x < 200\n\n If the same column has multiple values, they are combined like this:\n\n - ``?x=a&x=b`` selects x IN (a, b)\n - ``?x!=a&x!=b`` selects x NOT IN (a, b)\n - ``?x~=a&x~=b`` selects x ~ a|b\n - ``?x>=a&x>=b`` selects x > MIN(a, b)\n - ``?x<=a&x<=b`` selects x < MAX(a, b)\n\n Arguments are converted to the type of the column before comparing. If this\n fails, it raises a ValueError.\n\n These URL query parameters control the output:\n\n - ``?_sort=col`` sorts column col in ascending order. ``?_sort=-col`` sorts\n in descending order.\n - ``?_limit=100`` limits the result to 100 rows\n - ``?_offset=100`` starts showing the result from row 100. Default: 0\n - ``?_c=x&_c=y`` returns only columns ``[x, y]``. ``?_c=-col`` drops col.\n\n If a column name matches one of the above, you cannot filter by that column.\n Avoid column names beginning with _.\n\n To get additional information about the filtering, use::\n\n meta = {} # Create a variable which will be filled with more info\n filtered = gramex.data.filter(data, meta=meta, **handler.args)\n\n The ``meta`` variable is populated with the following keys:\n\n - ``filters``: Applied filters as ``[(col, op, val), ...]``\n - ``ignored``: Ignored filters as ``[(col, vals), ('_sort', col), ('_by', col), ...]``\n - ``excluded``: Excluded columns as ``[col, ...]``\n - ``sort``: Sorted columns as ``[(col, True), ...]``. The second parameter is ``ascending=``\n - ``offset``: Offset as integer. Defaults to 0\n - ``limit``: Limit as integer - ``None`` if limit is not applied\n - ``count``: Total number of rows, if available\n - ``by``: Group by columns as ``[col, ...]``\n - ``inserted``: List of (dict of primary values) for each inserted row\n\n These variables may be useful to show additional information about the\n filtered data.\n '''\n # Auto-detect engine.\n if engine is None:\n engine = get_engine(url)\n\n # Pass the meta= argument from kwargs (if any)\n meta.update({\n 'filters': [], # Applied filters as [(col, op, val), ...]\n 'ignored': [], # Ignored filters as [(col, vals), ...]\n 'sort': [], # Sorted columns as [(col, asc), ...]\n 'offset': 0, # Offset as integer\n 'limit': None, # Limit as integer - None if not applied\n 'by': [], # Group by columns as [col, ...]\n })\n controls = _pop_controls(args)\n transform = _transform_fn(transform, transform_kwargs)\n url, ext, query, queryfile, kwargs = _replace(\n engine, args, url, ext, query, queryfile, **kwargs)\n\n # Use the appropriate filter function based on the engine\n if engine == 'dataframe':\n data = transform(url) if callable(transform) else url\n return _filter_frame(data, meta=meta, controls=controls, args=args)\n elif engine == 'dir':\n data = dirstat(url, **args)\n data = transform(data) if callable(transform) else data\n return _filter_frame(data, meta=meta, controls=controls, args=args)\n elif engine in {'file', 'http', 'https'}:\n if engine == 'file' and not os.path.exists(url):\n raise OSError('url: %s not found' % url)\n # Get the full dataset. Then filter it\n data = gramex.cache.open(url, ext, transform=transform, **kwargs)\n return _filter_frame(data, meta=meta, controls=controls, args=args)\n elif engine.startswith('plugin+'):\n plugin = engine.split('+')[1]\n method = plugins[plugin]['filter']\n data = method(url=url, controls=controls, args=args, query=query, **kwargs)\n return _filter_frame(data, meta=meta, controls=controls, args=args)\n elif engine == 'sqlalchemy':\n table = kwargs.pop('table', None)\n state = kwargs.pop('state', None)\n engine = alter(url, table, columns, **kwargs)\n if query or queryfile:\n if queryfile:\n query = gramex.cache.open(queryfile, 'text')\n if not state:\n if isinstance(table, str):\n state = table if ' ' in table else [table]\n elif isinstance(table, (list, tuple)):\n state = [t for t in table]\n elif table is not None:\n raise ValueError('table: must be string or list of strings, not %r' % table)\n all_params = {k: v[0] for k, v in args.items() if len(v) > 0}\n data = gramex.cache.query(sa.text(query), engine, state, params=all_params)\n data = transform(data) if callable(transform) else data\n return _filter_frame(data, meta=meta, controls=controls, args=args)\n elif table:\n if callable(transform):\n data = gramex.cache.query(table, engine, [table])\n return _filter_frame(transform(data), meta=meta, controls=controls, args=args)\n else:\n return _filter_db(engine, table, meta=meta, controls=controls, args=args)\n else:\n raise ValueError('No table: or query: specified')\n else:\n raise ValueError('engine: %s invalid. Can be sqlalchemy|file|dataframe' % engine)\n\n\ndef delete(url, meta={}, args=None, engine=None, table=None, ext=None, id=None, columns=None,\n query=None, queryfile=None, transform=None, transform_kwargs={}, **kwargs):\n '''\n Deletes data using URL query parameters. Typical usage::\n\n count = gramex.data.delete(dataframe, args=handler.args, id=['id'])\n count = gramex.data.delete('file.csv', args=handler.args, id=['id'])\n count = gramex.data.delete('mysql://server/db', table='x', args=handler.args, id=['id'])\n\n ``id`` is a list of column names defining the primary key.\n Calling this in a handler with ``?id=1&id=2`` deletes rows with id is 1 or 2.\n\n It accepts the same parameters as :py:func:`filter`, and returns the number\n of deleted rows.\n '''\n if engine is None:\n engine = get_engine(url)\n meta.update({'filters': [], 'ignored': []})\n controls = _pop_controls(args)\n url, table, ext, query, queryfile, kwargs = _replace(\n engine, args, url, table, ext, query, queryfile, **kwargs)\n if engine == 'dataframe':\n data_filtered = _filter_frame(url, meta=meta, controls=controls,\n args=args, source='delete', id=id)\n return len(data_filtered)\n elif engine == 'file':\n data = gramex.cache.open(url, ext, transform=transform, **kwargs)\n data_filtered = _filter_frame(data, meta=meta, controls=controls,\n args=args, source='delete', id=id)\n gramex.cache.save(data, url, ext, index=False, **kwargs)\n return len(data_filtered)\n elif engine.startswith('plugin+'):\n plugin = engine.split('+')[1]\n method = plugins[plugin]['delete']\n return method(url=url, meta=meta, controls=controls, args=args, id=id, table=table,\n columns=columns, ext=ext, query=query, queryfile=queryfile, **kwargs)\n elif engine == 'sqlalchemy':\n if table is None:\n raise ValueError('No table: specified')\n engine = alter(url, table, columns, **kwargs)\n return _filter_db(engine, table, meta=meta, controls=controls, args=args,\n source='delete', id=id)\n else:\n raise ValueError('engine: %s invalid. Can be sqlalchemy|file|dataframe' % engine)\n\n\ndef update(url, meta={}, args=None, engine=None, table=None, ext=None, id=None, columns=None,\n query=None, queryfile=None, transform=None, transform_kwargs={}, **kwargs):\n '''\n Update data using URL query parameters. Typical usage::\n\n count = gramex.data.update(dataframe, args=handler.args, id=['id'])\n count = gramex.data.update('file.csv', args=handler.args, id=['id'])\n count = gramex.data.update('mysql://server/db', table='x', args=handler.args, id=['id'])\n\n ``id`` is a list of column names defining the primary key.\n Calling this in a handler with ``?id=1&x=2`` updates x=2 where id=1.\n\n It accepts the same parameters as :py:func:`filter`, and returns the number of updated rows.\n '''\n if engine is None:\n engine = get_engine(url)\n meta.update({'filters': [], 'ignored': []})\n controls = _pop_controls(args)\n url, table, ext, query, queryfile, kwargs = _replace(\n engine, args, url, table, ext, query, queryfile, **kwargs)\n if engine == 'dataframe':\n data_updated = _filter_frame(\n url, meta=meta, controls=controls, args=args, source='update', id=id)\n return len(data_updated)\n elif engine == 'file':\n data = gramex.cache.open(url, ext, transform=transform, **kwargs)\n data_updated = _filter_frame(\n data, meta=meta, controls=controls, args=args, source='update', id=id)\n gramex.cache.save(data, url, ext, index=False, **kwargs)\n return len(data_updated)\n elif engine.startswith('plugin+'):\n plugin = engine.split('+')[1]\n method = plugins[plugin]['update']\n return method(url=url, meta=meta, controls=controls, args=args, id=id, table=table,\n columns=columns, ext=ext, query=query, queryfile=queryfile, **kwargs)\n elif engine == 'sqlalchemy':\n if table is None:\n raise ValueError('No table: specified')\n engine = alter(url, table, columns, **kwargs)\n return _filter_db(engine, table, meta=meta, controls=controls, args=args,\n source='update', id=id)\n else:\n raise ValueError('engine: %s invalid. Can be sqlalchemy|file|dataframe' % engine)\n\n\ndef insert(url, meta={}, args=None, engine=None, table=None, ext=None, id=None, columns=None,\n query=None, queryfile=None, transform=None, transform_kwargs={}, **kwargs):\n '''\n Insert data using URL query parameters. Typical usage::\n\n count = gramex.data.insert(dataframe, args=handler.args, id=['id'])\n count = gramex.data.insert('file.csv', args=handler.args, id=['id'])\n count = gramex.data.insert('mysql://server/db', table='x', args=handler.args, id=['id'])\n\n ``id`` is a list of column names defining the primary key.\n Calling this in a handler with ``?id=3&x=2`` inserts a new record with id=3 and x=2.\n\n If the target file / table does not exist, it is created.\n\n It accepts the same parameters as :py:func:`filter`, and returns the number of updated rows.\n '''\n if engine is None:\n engine = get_engine(url)\n _pop_controls(args)\n if not args:\n raise ValueError('No args: specified')\n meta.update({'filters': [], 'ignored': [], 'inserted': []})\n # If values do not have equal number of elements, pad them and warn\n rowcount = max(len(val) for val in args.values())\n for key, val in args.items():\n rows = len(val)\n if 0 < rows < rowcount:\n val += [val[-1]] * (rowcount - rows)\n app_log.warning('data.insert: column %s has %d rows not %d. Extended last value %s',\n key, rows, rowcount, val[-1])\n rows = pd.DataFrame.from_dict(args)\n url, table, ext, query, queryfile, kwargs = _replace(\n engine, args, url, table, ext, query, queryfile, **kwargs)\n if engine == 'dataframe':\n rows = _pop_columns(rows, url.columns, meta['ignored'])\n url = url.append(rows, sort=False)\n return len(rows)\n elif engine == 'file':\n try:\n data = gramex.cache.open(url, ext, transform=None, **kwargs)\n except (OSError, IOError):\n data = rows\n else:\n rows = _pop_columns(rows, data.columns, meta['ignored'])\n data = data.append(rows, sort=False)\n gramex.cache.save(data, url, ext, index=False, **kwargs)\n return len(rows)\n elif engine.startswith('plugin+'):\n plugin = engine.split('+')[1]\n method = plugins[plugin]['insert']\n return method(url=url, rows=rows, meta=meta, args=args, table=table, **kwargs)\n elif engine == 'sqlalchemy':\n if table is None:\n raise ValueError('No table: specified')\n engine = alter(url, table, columns, **kwargs)\n try:\n cols = get_table(engine, table).columns\n except sa.exc.NoSuchTableError:\n pass\n else:\n rows = _pop_columns(rows, [col.name for col in cols], meta['ignored'])\n if '.' in table:\n kwargs['schema'], table = table.rsplit('.', 1)\n # If the DB doesn't yet have the table, create it WITH THE PRIMARY KEYS.\n # Note: pandas does not document engine.dialect.has_table so it might change.\n if not engine.dialect.has_table(engine, table) and id:\n engine.execute(pd.io.sql.get_schema(rows, name=table, keys=id, con=engine))\n\n def insert_method(tbl, conn, keys, data_iter):\n '''Pandas .to_sql() doesn't return inserted row primary keys. Capture it in meta'''\n data = [dict(zip(keys, row)) for row in data_iter]\n # If the ?id= is not provided, Pandas creates a schema based on available columns,\n # without the `id` column. SQLAlchemy won't return inserted_primary_key unless the\n # metadata has a primary key. So, hoping that the table already has a primary key,\n # load table from DB via extend_existing=True.\n sa_table = sa.Table(table, tbl.table.metadata,\n extend_existing=True, autoload_with=engine)\n r = conn.execute(sa_table.insert(), data)\n # SQLAlchemy 1.4+ supports inserted_primary_key_rows, but is in beta (Nov 2020).\n # ids = getattr(r, 'inserted_primary_key_rows', [])\n # If we have SQLAlchemy 1.3, only single inserts have an inserted_primary_key.\n ids = [r.inserted_primary_key] if hasattr(r, 'inserted_primary_key') else []\n # Add non-empty IDs as a dict with associated keys\n id_cols = [col.name for col in sa_table.primary_key]\n for row in ids:\n if row:\n meta['inserted'].append(dict(zip(id_cols, row)))\n\n kwargs['method'] = insert_method\n # If user passes ?col= with empty value, replace with NULL. If the column is an INT/FLOAT,\n # type conversion int('') / float('') will fail.\n rows.replace('', None, inplace=True)\n pd.io.sql.to_sql(rows, table, engine, if_exists='append', index=False, **kwargs)\n return len(rows)\n else:\n raise ValueError('engine: %s invalid. Can be sqlalchemy|file|dataframe' % engine)\n\n\ndef get_engine(url):\n '''\n Used to detect type of url passed. Returns:\n\n - ``'dataframe'`` if url is a Pandas DataFrame\n - ``'sqlalchemy'`` if url is a sqlalchemy compatible URL\n - ``'plugin'`` if it is `<valid-plugin-name>://...`\n - ``protocol`` if url is of the form `protocol://...`\n - ``'dir'`` if it is not a URL but a valid directory\n - ``'file'`` if it is not a URL but a valid file\n\n Else it raises an Exception\n '''\n if isinstance(url, pd.DataFrame):\n return 'dataframe'\n for plugin_name in plugins:\n if url.startswith(f'{plugin_name}:'):\n return f'plugin+{plugin_name}'\n try:\n url = sa.engine.url.make_url(url)\n except sa.exc.ArgumentError:\n return 'dir' if os.path.isdir(url) else 'file'\n try:\n url.get_driver_name()\n return 'sqlalchemy'\n except sa.exc.NoSuchModuleError:\n return url.drivername\n\n\ndef create_engine(url, create=sa.create_engine, **kwargs):\n '''\n Cached version of sqlalchemy.create_engine (or any custom engine).\n\n Normally, this is not required. But :py:func:`get_table` caches the engine\n *and* metadata *and* uses autoload=True. This makes sqlalchemy create a new\n database connection for every engine object, and not dispose it. So we\n re-use the engine objects within this module.\n '''\n if url not in _ENGINE_CACHE:\n _ENGINE_CACHE[url] = create(url, **kwargs)\n return _ENGINE_CACHE[url]\n\n\ndef get_table(engine, table, **kwargs):\n '''Return the sqlalchemy table from the engine and table name'''\n if engine not in _METADATA_CACHE:\n _METADATA_CACHE[engine] = sa.MetaData(bind=engine)\n metadata = _METADATA_CACHE[engine]\n if '.' in table:\n kwargs['schema'], table = table.rsplit('.', 1)\n return sa.Table(table, metadata, autoload=True, autoload_with=engine, **kwargs)\n\n\ndef _pop_controls(args):\n '''Filter out data controls: _sort, _limit, _offset, _c (column) and _by from args'''\n return {\n key: args.pop(key)\n for key in ('_sort', '_limit', '_offset', '_c', '_by')\n if key in args\n }\n\n\ndef _pop_columns(data, cols, ignored):\n '''Remove columns not in cols'''\n cols = set(cols)\n for col in data.columns:\n if col not in cols:\n ignored.append([col, data[col].tolist()])\n return data[[col for col in cols if col in data.columns]]\n\n\ndef _sql_safe(val):\n '''Return True if val is safe for insertion in an SQL query'''\n if isinstance(val, str):\n return not re.search(r'\\s', val)\n elif isinstance(val, (int, float, bool)):\n return True\n return False\n\n\ndef _path_safe(path):\n '''Returns True if path does not try to escape outside a given directory using .. or / etc'''\n # Ignore non-strings. These are generally not meant for paths\n if not isinstance(path, str):\n return True\n return os.path.realpath(os.path.join(_path_safe_root, path)).startswith(_path_safe_root)\n\n\n# The order of operators is important. ~ is at the end. Otherwise, !~\n# or >~ will also be mapped to ~ as an operator\noperators = ['!', '>', '>~', '<', '<~', '!~', '~']\n\n\ndef _filter_col(col, cols):\n '''\n Parses a column name from a list of columns and returns a (col, agg, op)\n tuple.\n\n - ``col`` is the name of the column in cols.\n - ``agg`` is the aggregation operation (SUM, MIN, MAX, etc), else None\n - ``op`` is the operator ('', !, >, <, etc)\n\n If the column is invalid, then ``col`` and ``op`` are None\n '''\n colset = set(cols)\n # ?col= is returned quickly\n if col in colset:\n return col, None, ''\n # Check if it matches a non-empty operator, like ?col>~=\n for op in operators:\n if col.endswith(op):\n name = col[:-len(op)]\n if name in colset:\n return name, None, op\n # If there's an aggregator, split it out, like ?col|SUM>~=\n elif _agg_sep in name:\n name, agg = name.rsplit(_agg_sep, 1)\n if name in colset:\n return name, agg, op\n # If no operators match, it might be a pure aggregation, like ?col|SUM=\n if _agg_sep in col:\n name, agg = col.rsplit(_agg_sep, 1)\n if name in colset:\n return name, agg, ''\n # Otherwise we don't know what it is\n return None, None, None\n\n\ndef _filter_frame_col(data, key, col, op, vals, meta):\n # Apply type conversion for values\n conv = data[col].dtype.type\n vals = tuple(conv(val) for val in vals if val)\n if op not in {'', '!'} and len(vals) == 0:\n meta['ignored'].append((key, vals))\n elif op == '':\n data = data[data[col].isin(vals)] if len(vals) else data[pd.notnull(data[col])]\n elif op == '!':\n data = data[~data[col].isin(vals)] if len(vals) else data[pd.isnull(data[col])]\n elif op == '>':\n data = data[data[col] > min(vals)]\n elif op == '>~':\n data = data[data[col] >= min(vals)]\n elif op == '<':\n data = data[data[col] < max(vals)]\n elif op == '<~':\n data = data[data[col] <= max(vals)]\n elif op == '!~':\n data = data[~data[col].str.contains('|'.join(vals))]\n elif op == '~':\n data = data[data[col].str.contains('|'.join(vals))]\n meta['filters'].append((col, op, vals))\n return data\n\n\ndef _filter_db_col(query, method, key, col, op, vals, column, conv, meta):\n '''\n - Updates ``query`` with a method (WHERE/HAVING) that sets '<key> <op> <vals>'\n - ``column`` is the underlying ColumnElement\n - ``conv`` is a type conversion function that converts ``vals`` to the correct type\n - Updates ``meta`` with the fields used for filtering (or ignored)\n '''\n # In PY2, .python_type returns str. We want unicode\n sql_types = {bytes: str, pd.datetime: str}\n conv = sql_types.get(conv, conv)\n vals = tuple(conv(val) for val in vals if val)\n if op not in {'', '!'} and len(vals) == 0:\n meta['ignored'].append((key, vals))\n elif op == '':\n # Test if column is not NULL. != None is NOT the same as is not None\n query = method(column.in_(vals) if len(vals) else column != None) # noqa\n elif op == '!':\n # Test if column is NULL. == None is NOT the same as is None\n query = method(column.notin_(vals) if len(vals) else column == None) # noqa\n elif op == '>':\n query = method(column > min(vals))\n elif op == '>~':\n query = method(column >= min(vals))\n elif op == '<':\n query = method(column < max(vals))\n elif op == '<~':\n query = method(column <= max(vals))\n elif op == '!~':\n query = method(column.notlike('%' + '%'.join(vals) + '%'))\n elif op == '~':\n query = method(column.like('%' + '%'.join(vals) + '%'))\n meta['filters'].append((col, op, vals))\n return query\n\n\ndef _filter_sort_columns(sort_filter, cols):\n sorts, ignore_sorts = [], []\n for col in sort_filter:\n if col in cols:\n sorts.append((col, True))\n elif col.startswith('-') and col[1:] in cols:\n sorts.append((col[1:], False))\n else:\n ignore_sorts.append(col)\n return sorts, ignore_sorts\n\n\ndef _filter_select_columns(col_filter, cols, meta):\n '''\n Checks ?_c=col&_c=-col for filter(). Takes values of ?_c= as col_filter and\n data column names as cols. Returns 2 lists: show_cols as columns to show.\n ignored_cols has column names not in the list, i.e. the ?_c= parameters that\n are ignored.\n '''\n selected_cols, excluded_cols, ignored_cols = [], set(), []\n for col in col_filter:\n if col in cols:\n selected_cols.append(col)\n elif col.startswith('-') and col[1:] in cols:\n excluded_cols.add(col[1:])\n else:\n ignored_cols.append(col)\n if len(excluded_cols) > 0 and len(selected_cols) == 0:\n selected_cols = cols\n show_cols = [col for col in selected_cols if col not in excluded_cols]\n meta['excluded'] = list(excluded_cols)\n return show_cols, ignored_cols\n\n\ndef _filter_groupby_columns(by, cols, meta):\n '''\n Checks ?_by=col&_by=col for filter().\n\n - ``by``: list of column names to group by\n - ``cols``: list of valid column names\n - ``meta``: meta['by'] and meta['ignored'] are updated\n\n Returns a list of columns to group by\n '''\n colset = set(cols)\n for col in by:\n if col in colset:\n meta['by'].append(col)\n else:\n meta['ignored'].append(('_by', col))\n return meta['by']\n\n\n# If ?by=col|avg is provided, this works in SQL but not in Pandas DataFrames.\n# Convert into a DataFrame friendly function\n_frame_functions = {\n 'avg': 'mean',\n 'average': 'mean',\n}\n\n\ndef _filter_frame(data, meta, controls, args, source='select', id=[]):\n '''\n If ``source`` is ``'select'``, returns a DataFrame in which the DataFrame\n ``data`` is filtered using ``args``. Additional controls like _sort, etc are\n in ``controls``. Metadata is stored in ``meta``.\n\n If ``source`` is ``'update'``, filters using ``args`` but only for columns\n mentioned in ``id``. Resulting DataFrame is updated with remaining ``args``.\n Returns the updated rows.\n\n If ``source`` is ``'delete'``, filters using ``args`` but only for columns\n mentioned in ``id``. Deletes these rows. Returns the deleted rows.\n\n :arg data: dataframe\n :arg meta: dictionary of `filters`, `ignored`, `sort`, `offset`, `limit` params from kwargs\n :arg args: user arguments to filter the data\n :arg source: accepted values - `update`, `delete` for PUT, DELETE methods in FormHandler\n :arg id: list of id specific to data using which values can be updated\n '''\n original_data = data\n cols_for_update = {}\n cols_having = []\n for key, vals in args.items():\n # check if `key`` is in the `id` list -- ONLY when data is updated\n if (source in ('update', 'delete') and key in id) or (source == 'select'):\n # Parse column names, ignoring missing / unmatched columns\n col, agg, op = _filter_col(key, data.columns)\n if col is None:\n meta['ignored'].append((key, vals))\n continue\n # Process aggregated columns AFTER filtering, not before (like HAVING clause)\n # e.g. ?sales|SUM=<val> should be applied only after the column is created\n if agg is not None:\n cols_having.append((key, col + _agg_sep + agg, op, vals))\n continue\n # Apply filters\n data = _filter_frame_col(data, key, col, op, vals, meta)\n elif source == 'update':\n # Update values should only contain 1 value. 2nd onwards are ignored\n if key not in data.columns or len(vals) == 0:\n meta['ignored'].append((key, vals))\n else:\n cols_for_update[key] = vals[0]\n if len(vals) > 1:\n meta['ignored'].append((key, vals[1:]))\n else:\n meta['ignored'].append((key, vals))\n meta['count'] = len(data)\n if source == 'delete':\n original_data.drop(data.index, inplace=True)\n return data\n elif source == 'update':\n conv = {k: v.type for k, v in data.dtypes.items()}\n for key, val in cols_for_update.items():\n original_data.loc[data.index, key] = conv[key](val)\n return data\n else:\n # Apply controls\n if '_by' in controls:\n by = _filter_groupby_columns(controls['_by'], data.columns, meta)\n # If ?_c is not specified, use 'col|sum' for all numeric columns\n # TODO: This does not support ?_c=-<col> to hide a column\n col_list = controls.get('_c', None)\n if col_list is None:\n col_list = [col + _agg_sep + 'sum' for col in data.columns # noqa\n if pd.api.types.is_numeric_dtype(data[col])]\n agg_cols = []\n agg_dict = AttrDict()\n for key in col_list:\n col, agg, val = _filter_col(key, data.columns)\n if agg is not None:\n # Convert aggregation into a Pandas GroupBy agg function\n agg = agg.lower()\n agg = _frame_functions.get(agg, agg)\n agg_cols.append(key)\n if col in agg_dict:\n agg_dict[col].append(agg)\n else:\n agg_dict[col] = [agg]\n if len(by) > 0:\n if not agg_cols:\n # If no aggregation columns exist, just show groupby columns.\n data = data.groupby(by).agg('size').reset_index()\n data = data.iloc[:, [0]]\n else:\n data = data.groupby(by).agg(agg_dict)\n data.columns = agg_cols\n data = data.reset_index()\n # Apply HAVING operators\n for key, col, op, vals in cols_having:\n data = _filter_frame_col(data, key, col, op, vals, meta)\n else:\n row = [data[col].agg(op) for col, ops in agg_dict.items() for op in ops]\n data = pd.DataFrame([row], columns=agg_cols)\n elif '_c' in controls:\n show_cols, hide_cols = _filter_select_columns(controls['_c'], data.columns, meta)\n data = data[show_cols]\n if len(hide_cols) > 0:\n meta['ignored'].append(('_c', hide_cols))\n if '_sort' in controls:\n meta['sort'], ignore_sorts = _filter_sort_columns(controls['_sort'], data.columns)\n if len(meta['sort']) > 0:\n data = data.sort_values(by=[c[0] for c in meta['sort']],\n ascending=[c[1] for c in meta['sort']])\n if len(ignore_sorts) > 0:\n meta['ignored'].append(('_sort', ignore_sorts))\n if '_offset' in controls:\n try:\n offset = min(int(v) for v in controls['_offset'])\n except ValueError:\n raise ValueError('_offset not integer: %r' % controls['_offset'])\n data = data.iloc[offset:]\n meta['offset'] = offset\n if '_limit' in controls:\n try:\n limit = min(int(v) for v in controls['_limit'])\n except ValueError:\n raise ValueError('_limit not integer: %r' % controls['_limit'])\n data = data.iloc[:limit]\n meta['limit'] = limit\n return data\n\n\ndef _filter_db(engine, table, meta, controls, args, source='select', id=[]):\n '''\n\n It accepts the following parameters\n\n :arg sqlalchemy engine engine: constructed sqlalchemy string\n :arg database table table: table name in the mentioned database\n :arg controls: dictionary of `_sort`, `_c`, `_offset`, `_limit` params\n :arg meta: dictionary of `filters`, `ignored`, `sort`, `offset`, `limit` params from kwargs\n :arg args: dictionary of user arguments to filter the data\n :arg source: accepted values - `update`, `delete` for PUT, DELETE methods in FormHandler\n :arg id: list of keys specific to data using which values can be updated\n '''\n table = get_table(engine, table)\n cols = table.columns\n colslist = cols.keys()\n\n if source == 'delete':\n query = sa.delete(table)\n elif source == 'update':\n query = sa.update(table)\n else:\n query = sa.select([table])\n cols_for_update = {}\n cols_having = []\n for key, vals in args.items():\n # check if `key`` is in the `id` list -- ONLY when data is updated\n if (source in ('update', 'delete') and key in id) or (source == 'select'):\n # Parse column names, ignoring missing / unmatched columns\n col, agg, op = _filter_col(key, colslist)\n if col is None:\n meta['ignored'].append((key, vals))\n continue\n # Process aggregated columns AFTER filtering, not before (like HAVING clause)\n # e.g. ?sales|SUM=<val> should be applied only after the column is created\n if agg is not None:\n cols_having.append((key, col + _agg_sep + agg, op, vals))\n continue\n # Apply filters\n query = _filter_db_col(query, query.where, key, col, op, vals,\n cols[col], cols[col].type.python_type, meta)\n elif source == 'update':\n # Update values should only contain 1 value. 2nd onwards are ignored\n if key not in cols or len(vals) == 0:\n meta['ignored'].append((key, vals))\n else:\n cols_for_update[key] = vals[0]\n if len(vals) > 1:\n meta['ignored'].append((key, vals[1:]))\n else:\n meta['ignored'].append((key, vals))\n if source == 'delete':\n res = engine.execute(query)\n return res.rowcount\n elif source == 'update':\n query = query.values(cols_for_update)\n res = engine.execute(query)\n return res.rowcount\n else:\n # Apply controls\n if '_by' in controls:\n by = _filter_groupby_columns(controls['_by'], colslist, meta)\n query = query.group_by(*by)\n # If ?_c is not specified, use 'col|sum' for all numeric columns\n # TODO: This does not support ?_c=-<col> to hide a column\n col_list = controls.get('_c', None)\n if col_list is None:\n col_list = [col + _agg_sep + 'sum' for col, column in cols.items() # noqa\n if column.type.python_type.__name__ in _numeric_types]\n agg_cols = AttrDict([(col, cols[col]) for col in by]) # {label: ColumnElement}\n typ = {} # {label: python type}\n for key in col_list:\n col, agg, val = _filter_col(key, colslist)\n if agg is not None:\n # Convert aggregation into SQLAlchemy query\n agg = agg.lower()\n typ[key] = _agg_type.get(agg, cols[col].type.python_type)\n agg_func = getattr(sa.sql.expression.func, agg)\n agg_cols[key] = agg_func(cols[col]).label(key)\n if not agg_cols:\n return pd.DataFrame()\n query = query.with_only_columns(agg_cols.values())\n # Apply HAVING operators\n for key, col, op, vals in cols_having:\n query = _filter_db_col(query, query.having, key, col, op, vals,\n agg_cols[col], typ[col], meta)\n elif '_c' in controls:\n show_cols, hide_cols = _filter_select_columns(controls['_c'], colslist, meta)\n query = query.with_only_columns([cols[col] for col in show_cols])\n if len(hide_cols) > 0:\n meta['ignored'].append(('_c', hide_cols))\n if len(show_cols) == 0:\n return pd.DataFrame()\n if '_sort' in controls:\n meta['sort'], ignore_sorts = _filter_sort_columns(\n controls['_sort'], colslist + query.columns.keys())\n for col, asc in meta['sort']:\n orderby = sa.asc if asc else sa.desc\n query = query.order_by(orderby(col))\n if len(ignore_sorts) > 0:\n meta['ignored'].append(('_sort', ignore_sorts))\n if '_offset' in controls:\n try:\n offset = min(int(v) for v in controls['_offset'])\n except ValueError:\n raise ValueError('_offset not integer: %r' % controls['_offset'])\n query = query.offset(offset)\n meta['offset'] = offset\n if '_limit' in controls:\n try:\n limit = min(int(v) for v in controls['_limit'])\n except ValueError:\n raise ValueError('_limit not integer: %r' % controls['_limit'])\n query = query.limit(limit)\n meta['limit'] = limit\n return pd.read_sql(query, engine)\n\n\n_VEGA_SCRIPT = os.path.join(_FOLDER, 'download.vega.js')\n\n\ndef download(data, format='json', template=None, args={}, **kwargs):\n '''\n Download a DataFrame or dict of DataFrames in various formats. This is used\n by :py:class:`gramex.handlers.FormHandler`. You are **strongly** advised to\n try it before creating your own FunctionHandler.\n\n Usage as a FunctionHandler::\n\n def download_as_csv(handler):\n handler.set_header('Content-Type', 'text/csv')\n handler.set_header('Content-Disposition', 'attachment;filename=data.csv')\n return gramex.data.download(dataframe, format='csv')\n\n It takes the following arguments:\n\n :arg dataset data: A DataFrame or a dict of DataFrames\n :arg str format: Output format. Can be ``csv|json|html|xlsx|template``\n :arg file template: Path to template file for ``template`` format\n :arg dict args: dictionary of user arguments to subsitute spec\n :arg dict kwargs: Additional parameters that are passed to the relevant renderer\n :return: bytes with the download file contents\n\n When ``data`` is a DataFrame, this is what different ``format=`` parameters\n return:\n\n - ``csv`` returns a UTF-8-BOM encoded CSV file of the dataframe\n - ``xlsx`` returns an Excel file with 1 sheet named ``data``. kwargs are\n passed to ``.to_excel(index=False)``\n - ``html`` returns a HTML file with a single table. kwargs are passed to\n ``.to_html(index=False)``\n - ``json`` returns a JSON file. kwargs are passed to\n ``.to_json(orient='records', force_ascii=True)``.\n - ``template`` returns a Tornado template rendered file. The template\n receives ``data`` as ``data`` and any additional kwargs.\n - ``pptx`` returns a PPTX generated by pptgen\n - ``seaborn`` or ``sns`` returns a Seaborn generated chart\n - ``vega`` returns JavaScript that renders a Vega chart\n\n When ``data`` is a dict of DataFrames, the following additionally happens:\n\n - ``format='csv'`` renders all DataFrames one below the other, adding the\n key as heading\n - ``format='xlsx'`` renders each DataFrame on a sheet whose name is the key\n - ``format='html'`` renders tables below one another with the key as heading\n - ``format='json'`` renders as a dict of DataFrame JSONs\n - ``format='template'`` sends ``data`` and all ``kwargs`` as passed to the\n template\n - ``format='pptx'`` passes ``data`` as a dict of datasets to pptgen\n - ``format='vega'`` passes ``data`` as a dict of datasets to Vega\n\n You need to set the MIME types on the handler yourself. Recommended MIME\n types are in gramex.yaml under handler.FormHandler.\n '''\n if isinstance(data, dict):\n for key, val in data.items():\n if not isinstance(val, pd.DataFrame):\n raise ValueError('download({\"%s\": %r}) invalid type' % (key, type(val)))\n if not len(data):\n raise ValueError('download() data requires at least 1 DataFrame')\n multiple = True\n elif not isinstance(data, pd.DataFrame):\n raise ValueError('download(%r) invalid type' % type(data))\n else:\n data = {'data': data}\n multiple = False\n\n def kw(**conf):\n return merge(kwargs, conf, mode='setdefault')\n\n if format == 'csv':\n # csv.writer requires BytesIO in PY2 and StringIO in PY3.\n # I can't see an elegant way out of this other than writing code for each.\n out = io.StringIO()\n kw(index=False)\n for index, (key, val) in enumerate(data.items()):\n if index > 0:\n out.write('\\n')\n if multiple:\n out.write(key + '\\n')\n val.to_csv(out, **kwargs)\n result = out.getvalue()\n # utf-8-sig encoding returns the result with a UTF-8 BOM. Easier to open in Excel\n return result.encode('utf-8-sig') if result.strip() else result.encode('utf-8')\n elif format == 'template':\n return gramex.cache.open(template, 'template').generate(\n data=data if multiple else data['data'], **kwargs)\n elif format == 'html':\n out = io.StringIO()\n kw(index=False)\n for key, val in data.items():\n if multiple:\n out.write('<h1>%s</h1>' % key)\n val.to_html(out, **kwargs)\n return out.getvalue().encode('utf-8')\n elif format in {'xlsx', 'xls'}:\n out = io.BytesIO()\n kw(index=False)\n # TODO: Create and use a FrameWriter for formatting\n with pd.ExcelWriter(out, engine='xlsxwriter') as writer:\n for key, val in data.items():\n val.to_excel(writer, sheet_name=key, **kwargs)\n return out.getvalue()\n elif format in {'pptx', 'ppt'}:\n from gramex.pptgen import pptgen # noqa\n out = io.BytesIO()\n pptgen(target=out, data=data, **kwargs)\n return out.getvalue()\n elif format in {'seaborn', 'sns'}:\n kw = AttrDict()\n defaults = {'chart': 'barplot', 'ext': 'png', 'data': 'data', 'dpi': 96,\n 'width': 640, 'height': 480}\n for key, default in defaults.items():\n kw[key] = kwargs.pop(key, default)\n import matplotlib\n matplotlib.use('Agg') # Before importing seaborn, set a headless backend\n import seaborn as sns\n plot = getattr(sns, kw.chart)(data=data.get(kw.data), **kwargs)\n out = io.BytesIO()\n fig = plot.figure if hasattr(plot, 'figure') else plot.fig\n for k in ['dpi', 'width', 'height']:\n kw[k] = float(kw[k])\n fig.set_size_inches(kw.width / kw.dpi, kw.height / kw.dpi)\n fig.savefig(out, format=kw.ext, dpi=kw.dpi)\n fig.clear()\n return out.getvalue()\n elif format in {'vega', 'vega-lite', 'vegam'}:\n kwargs = kw(orient='records', force_ascii=True)\n spec = kwargs.pop('spec', {})\n kwargs.pop('handler', None)\n out = io.BytesIO()\n # conf = {..., spec: {..., data: __DATA__}}\n if isinstance(spec.get('data'), (dict, list)) or 'fromjson' in spec:\n # support only one dataset\n values = list(data.values())\n out.write(values[0].to_json(**kwargs).encode('utf-8'))\n out = out.getvalue()\n else:\n spec['data'] = '__DATA__'\n for index, (key, val) in enumerate(data.items()):\n out.write(b',{\"name\":' if index > 0 else b'{\"name\":')\n out.write(json_encode(key).encode('utf-8'))\n out.write(b',\"values\":')\n out.write(val.to_json(**kwargs).encode('utf-8'))\n out.write(b'}')\n out = out.getvalue()\n if format == 'vega':\n out = b'[' + out + b']'\n kwargs['spec'], _ = _replace('', args, spec)\n conf = json.dumps(kwargs, ensure_ascii=True, separators=(',', ':'), indent=None)\n conf = conf.encode('utf-8').replace(b'\"__DATA__\"', out)\n script = gramex.cache.open(_VEGA_SCRIPT, 'bin')\n return script.replace(b'/*{conf}*/', conf)\n else:\n out = io.BytesIO()\n kwargs = kw(orient='records', force_ascii=True)\n if multiple:\n out.write(b'{')\n for index, (key, val) in enumerate(data.items()):\n if index > 0:\n out.write(b',')\n out.write(json_encode(key).encode('utf-8'))\n out.write(b':')\n out.write(val.to_json(**kwargs).encode('utf-8'))\n out.write(b'}')\n else:\n out.write(data['data'].to_json(**kwargs).encode('utf-8'))\n return out.getvalue()\n\n\ndef dirstat(url, timeout=10, **kwargs):\n '''\n Return a DataFrame with the list of all files & directories under the url.\n\n It accepts the following parameters:\n\n :arg str url: path to a directory, or a URL like ``dir:///c:/path/``,\n ``dir:////root/dir/``. Raises ``OSError`` if url points to a missing\n location or is not a directory.\n :arg int timeout: max seconds to wait. ``None`` to wait forever. (default: 10)\n :return: a DataFrame with columns:\n - ``type``: extension with a ``.`` prefix -- or ``dir``\n - ``dir``: directory path to the file relative to the URL\n - ``name``: file name (including extension)\n - ``path``: full path to file or dir. This equals url / dir / name\n - ``size``: file size\n - ``mtime``: last modified time in seconds since epoch\n - ``level``: path depth (i.e. the number of paths in dir)\n '''\n try:\n url = sa.engine.url.make_url(url)\n target = url.database\n except sa.exc.ArgumentError:\n target = url\n if not os.path.isdir(target):\n raise OSError('dirstat: %s is not a directory' % target)\n target = os.path.normpath(target)\n result = []\n start_time = time.time()\n for dirpath, dirnames, filenames in os.walk(target):\n if timeout and time.time() - start_time > timeout:\n app_log.debug('dirstat: %s timeout (%.1fs)', url, timeout)\n break\n for name in dirnames:\n path = os.path.join(dirpath, name)\n stat = os.stat(path)\n dirname = dirpath.replace(target, '').replace(os.sep, '/') + '/'\n result.append({\n 'path': path, 'dir': dirname, 'name': name, 'type': 'dir',\n 'size': stat.st_size, 'mtime': stat.st_mtime, 'level': dirname.count('/'),\n })\n for name in filenames:\n path = os.path.join(dirpath, name)\n stat = os.stat(path)\n dirname = dirpath.replace(target, '').replace(os.sep, '/') + '/'\n result.append({\n 'path': path, 'dir': dirname, 'name': name, 'type': os.path.splitext(name)[-1],\n 'size': stat.st_size, 'mtime': stat.st_mtime, 'level': dirname.count('/'),\n })\n return pd.DataFrame(result)\n\n\ndef filtercols(url, args={}, meta={}, engine=None, ext=None,\n query=None, queryfile=None, transform=None, transform_kwargs={}, **kwargs):\n '''\n Filter data and extract unique values of each column using URL query parameters.\n Typical usage::\n\n filtered = gramex.data.filtercols(dataframe, args=handler.args)\n filtered = gramex.data.filtercols('file.csv', args=handler.args)\n filtered = gramex.data.filtercols('mysql://server/db', table='table', args=handler.args)\n\n It accepts the following parameters:\n\n :arg source url: Pandas DataFrame, sqlalchemy URL, directory or file name,\n `.format``-ed using ``args``.\n :arg dict args: URL query parameters as a dict of lists. Pass handler.args or parse_qs results\n :arg dict meta: this dict is updated with metadata during the course of filtering\n :arg str engine: over-rides the auto-detected engine. Can be 'dataframe', 'file',\n 'http', 'https', 'sqlalchemy', 'dir'\n :arg str ext: file extension (if url is a file). Defaults to url extension\n :arg str query: optional SQL query to execute (if url is a database),\n ``.format``-ed using ``args`` and supports SQLAlchemy SQL parameters.\n Loads entire result in memory before filtering.\n :arg str queryfile: optional SQL query file to execute (if url is a database).\n Same as specifying the ``query:`` in a file. Overrides ``query:``\n :arg function transform: optional in-memory transform of source data. Takes\n the result of gramex.cache.open or gramex.cache.query. Must return a\n DataFrame. Applied to both file and SQLAlchemy urls.\n :arg dict transform_kwargs: optional keyword arguments to be passed to the\n transform function -- apart from data\n :arg dict kwargs: Additional parameters are passed to\n :py:func:`gramex.cache.open` or ``sqlalchemy.create_engine``\n :return: a filtered DataFrame\n\n Remaining kwargs are passed to :py:func:`gramex.cache.open` if ``url`` is a file, or\n ``sqlalchemy.create_engine`` if ``url`` is a SQLAlchemy URL.\n\n If this is used in a handler as::\n\n filtered = gramex.data.filtercols(dataframe, args=handler.args)\n\n ... then calling the handler with ``?_c=state&_c=district`` returns all unique values\n in columns of ``dataframe`` where columns are state and district.\n\n Column filter supports like this:\n\n - ``?_c=y&x`` returns df with unique values of y where x is not null\n - ``?_c=y&x=val`` returns df with unique values of y where x == val\n - ``?_c=y&y=val`` returns df with unique values of y, ignores filter y == val\n - ``?_c=y&x>=val`` returns df with unique values of y where x > val\n - ``?_c=x&_c=y&x=val`` returns df with unique values of x ignoring filter x == val\n and returns unique values of y where x == val\n\n Arguments are converted to the type of the column before comparing. If this\n fails, it raises a ValueError.\n\n These URL query parameters control the output:\n\n - ``?_sort=col`` sorts column col in ascending order. ``?_sort=-col`` sorts\n in descending order.\n - ``?_limit=100`` limits the result to 100 rows\n - ``?_offset=100`` starts showing the result from row 100. Default: 0\n - ``?_c=x&_c=y`` returns only columns ``[x, y]``. ``?_c=-col`` drops col.\n\n If a column name matches one of the above, you cannot filter by that column.\n Avoid column names beginning with _.\n\n To get additional information about the filtering, use::\n\n meta = {} # Create a variable which will be filled with more info\n filtered = gramex.data.filter(data, meta=meta, **handler.args)\n\n The ``meta`` variable is populated with the following keys:\n\n - ``filters``: Applied filters as ``[(col, op, val), ...]``\n - ``ignored``: Ignored filters as ``[(col, vals), ('_sort', cols), ...]``\n - ``excluded``: Excluded columns as ``[col, ...]``\n - ``sort``: Sorted columns as ``[(col, True), ...]``. The second parameter is ``ascending=``\n - ``offset``: Offset as integer. Defaults to 0\n - ``limit``: Limit as integer - ``100`` if limit is not applied\n - ``count``: Total number of rows, if available\n\n These variables may be useful to show additional information about the\n filtered data.\n '''\n # Auto-detect engine.\n if engine is None:\n engine = get_engine(url)\n result = {}\n limit = args.get('_limit', [100])\n try:\n limit = min(int(v) for v in limit)\n except ValueError:\n raise ValueError('_limit not integer: %r' % limit)\n for col in args.get('_c', []):\n # col_args takes _sort, _c and all filters from args\n col_args = {}\n for key, value in args.items():\n if key in ['_sort']:\n col_args[key] = value\n # Ignore any filters on the column we are currently processing\n if not key.startswith('_') and key != col:\n col_args[key] = value\n col_args['_by'] = [col]\n col_args['_c'] = []\n col_args['_limit'] = [limit]\n result[col] = gramex.data.filter(url, args=col_args, **kwargs)\n return result\n\n\ndef alter(url: str, table: str, columns: dict = None, **kwargs):\n '''\n Create or alter a table with columns specified::\n\n gramex.data.alter(url, table, columns={\n 'id': {'type': 'int', 'primary_key': True, 'autoincrement': True},\n 'email': {'nullable': True, 'default': 'none'},\n 'age': {'type': 'float', 'nullable': False, 'default': 18},\n })\n\n It accepts the following parameters:\n\n :arg str url: sqlalchemy URL\n :arg str table: table name\n :arg dict columns: column names, with values are SQL types, or dicts with keys:\n - ``type`` (str), e.g. ``\"VARCHAR(10)\"``\n - ``default`` (str/int/float/bool), e.g. ``\"[email protected]\"``\n - ``nullable`` (bool), e.g. ``False``\n - ``primary_key`` (bool), e.g. ``True`` -- used only when creating new tables\n - ``autoincrement`` (bool), e.g. ``True`` -- used only when creating new tables\n :return: sqlalchemy engine\n\n Other kwargs are passed to ``sqlalchemy.create_engine()``.\n\n If the table exists, any new columns are added. Existing columns are unchanged.\n\n If the table does not exist, the table is created with the specified columns.\n\n Note: ``primary_key`` and ``autoincrement`` don't work on existing tables because:\n - SQLite disallows PRIMARY KEY with ALTER. https://stackoverflow.com/a/1120030/100904\n - AUTO_INCREMENT doesn't work without PRIMARY KEY in MySQL\n '''\n engine = create_engine(url, **kwargs)\n if columns is None:\n return engine\n try:\n db_table = get_table(engine, table)\n except sa.exc.NoSuchTableError:\n # If the table's not in the DB, create it\n cols = []\n for name, row in columns.items():\n row = dict({'type': row} if isinstance(row, str) else row, name=name)\n col_type = row.get('type', 'text')\n if isinstance(col_type, str):\n # Use eval() to handle direct types like INTEGER *and* expressions like VARCHAR(3)\n # eval() is safe here since `col_type` is written by app developer\n row['type'] = eval(col_type.upper(), vars(sa.types)) # nosec: frozen input\n row['type_'] = row.pop('type')\n if 'default' in row:\n row['server_default'] = str(row.pop('default'))\n cols.append(sa.Column(**row))\n sa.Table(table, _METADATA_CACHE[engine], *cols).create(engine)\n else:\n quote = engine.dialect.identifier_preparer.quote_identifier\n # If the table's already in the DB, add new columns. We can't change column types\n with engine.connect() as conn:\n with conn.begin():\n for name, row in columns.items():\n if name in db_table.columns:\n continue\n row = {'type': row} if isinstance(row, str) else row\n col_type = row.get('type', 'text')\n constraints = []\n if 'nullable' in row:\n constraints.append('' if row['nullable'] else 'NOT NULL')\n if 'default' in row:\n # repr() converts int, float properly,\n # str into 'str' with single quotes (which is the MySQL standard)\n # TODO: datetime and other types will fail\n constraints += ['DEFAULT', repr(row['default'])]\n # This syntax works on DB2, MySQL, Oracle, PostgreSQL, SQLite\n conn.execute(\n f'ALTER TABLE {quote(table)} '\n f'ADD COLUMN {quote(name)} {col_type} {\" \".join(constraints)}')\n # Refresh table metadata after altering\n get_table(engine, table, extend_existing=True)\n return engine\n\n\n# NoSQL Operations\n# ----------------------------------------\n\ndef _type_conversion(param_list, operations=None):\n try:\n converted = []\n if operations == '<' or operations == '<=':\n converted = min(float(v) for v in param_list)\n if operations == '>' or operations == '>=':\n converted = max(float(v) for v in param_list)\n return converted\n except ValueError:\n raise ValueError('Value is not integer: %r' % param_list)\n\n\n_mongodb_op_map = {\n '<': '$lt',\n '<~': '$lte',\n '>': '$gt',\n '>~': '$gte',\n '': '$in',\n '!': '$nin'\n}\n\n\ndef _filter_mongodb_col(col, op, vals, meta_cols):\n if op in ['', '!']:\n return {col: {_mongodb_op_map[op]: vals}}\n elif op == '!~':\n return {col: {\"$not\": {\"$regex\": '|'.join(vals), \"$options\": 'i'}}}\n elif op == '~':\n return {col: {\"$regex\": '|'.join(vals), \"$options\": 'i'}}\n elif col and op in _mongodb_op_map.keys():\n # TODO: Improve the numpy to Python type\n convert = int if (meta_cols[col].dtype == pd.np.int64) else meta_cols[col].dtype.type\n return {col: {_mongodb_op_map[op]: convert(val)} for val in vals}\n\n\ndef _mongodb_query(args, meta_cols):\n # Convert a query like x>=3&x>=4&x>=5 into\n # {\"$or\": [{x: {$gt: 3}}, {x: {$gt: 4}}, {x: $gt: 5}]}\n # TODO: ?_id= is not working\n conditions = []\n for key, vals in args.items():\n col, agg, op = _filter_col(key, meta_cols)\n if col:\n conditions.append(_filter_mongodb_col(col, op, vals, meta_cols))\n # TODO: add meta['ignored']\n return {'$and': conditions} if len(conditions) > 1 else conditions[0] if conditions else {}\n\n\ndef _controls_default(table, query=None, controls=None, meta_cols=None):\n '''Get the controls like, _c, _sort, _offset, _limit'''\n\n if '_c' in controls:\n _projection = dict()\n # _projection = {'field': 1, 'field1': -1}\n for c in controls['_c']:\n _projection[c] = 1\n cursor = table.find(query, _projection)\n else:\n cursor = table.find(query)\n\n if '_sort' in controls:\n sort, ignore_sorts = _filter_sort_columns(controls['_sort'], meta_cols)\n _sort = {key: (+1 if val else -1) for key, val in dict(sort).items()}\n\n # sort, [('field1', 1), ('field2', -1)]\n cursor = cursor.sort(list(_sort.items()))\n\n if '_offset' in controls:\n cursor = cursor.skip(int(controls['_offset'][0]))\n\n if '_limit' in controls:\n cursor = cursor.limit(int(controls['_limit'][0]))\n\n return cursor\n\n\ndef _mongodb_collection(url, database, collection, **kwargs):\n import pymongo\n\n # Create MongoClient\n create_kwargs = {key: val for key, val in kwargs.items() if key in\n {'port', 'document_class', 'tz_aware', 'connect'}}\n client = create_engine(url, create=pymongo.MongoClient, **create_kwargs)\n db = client[database]\n return db[collection]\n\n\ndef _mongodb_json(obj):\n '''Parse val in keys ending with . as JSON ({\"key.\": val}), but retain other keys'''\n result = {}\n for key, val in obj.items():\n if key.endswith('.'):\n result[key[:-1]] = json.loads(val)\n else:\n result[key] = val\n return result\n\n\ndef _filter_mongodb(url, controls, args, database=None, collection=None, query=None, **kwargs):\n '''TODO: Document function and usage'''\n table = _mongodb_collection(url, database, collection, **kwargs)\n if query is None:\n meta_cols = pd.DataFrame(list(table.find().limit(100)))\n query = _mongodb_query(args, meta_cols)\n cursor = _controls_default(table, query=query, controls=controls, meta_cols=meta_cols)\n data = pd.DataFrame(list(cursor))\n # Convert Object IDs into strings to allow JSON conversion\n if len(data) > 0:\n import bson\n for col, val in data.iloc[0].iteritems():\n if type(val) in {bson.objectid.ObjectId}:\n data[col] = data[col].map(str)\n return data\n\n\ndef _delete_mongodb(url, controls, args, meta=None, database=None, collection=None, query=None,\n **kwargs):\n table = _mongodb_collection(url, database, collection, **kwargs)\n meta_cols = pd.DataFrame(list(table.find().limit(100)))\n query = _mongodb_query(args, meta_cols)\n result = table.delete_many(query)\n return result.deleted_count\n\n\ndef _update_mongodb(url, controls, args, meta=None, database=None, collection=None, query=None,\n id=[], **kwargs):\n table = _mongodb_collection(url, database, collection, **kwargs)\n query = _mongodb_query(args, id)\n values = {key: val[0] for key, val in args.items()}\n result = table.update_many(query, {'$set': _mongodb_json(values)})\n return result.modified_count\n\n\ndef _insert_mongodb(url, rows, meta=None, database=None, collection=None, **kwargs):\n table = _mongodb_collection(url, database, collection, **kwargs)\n result = table.insert_many([_mongodb_json(row) for row in rows.to_dict(orient='records')])\n meta['inserted'] = [{'id': str(id) for id in result.inserted_ids}]\n return len(result.inserted_ids)\n\n\n# add test case for inserting nested value ?parent.={child:value}\n# curl --globoff -I -X POST 'http://127.0.0.1:9988/?x.={\"2\":3}&y.={\"true\":true}&Name=abcd'\n# add test case for updating nested value ?parent.child.={key:value}\n# curl --globoff -I -X PUT 'http://127.0.0.1:9988/?x.2=4&y.true.=[2,3]&Name=abcd'\n# add test case for nested document query ?parent.child=value\nplugins['mongodb'] = {\n 'filter': _filter_mongodb,\n 'delete': _delete_mongodb,\n 'insert': _insert_mongodb,\n 'update': _update_mongodb,\n}\n" ]
[ [ "pandas.io.sql.get_schema", "pandas.notnull", "pandas.isnull", "matplotlib.use", "pandas.DataFrame", "pandas.api.types.is_numeric_dtype", "pandas.DataFrame.from_dict", "pandas.ExcelWriter", "pandas.io.sql.to_sql", "pandas.read_sql" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
hchoi405/dppm
[ "0ce2ed2313277c243508f1cffbaa37a9644c19c5" ]
[ "scripts/exr.py" ]
[ "from __future__ import (absolute_import, division,\n print_function, unicode_literals)\nfrom builtins import *\nimport OpenEXR, Imath\nimport numpy as np\nimport os, sys\nfrom collections import defaultdict\n#import set\n\n# exr.py: Tools/helpers for various exr I/O operations\n\nFLOAT = Imath.PixelType(Imath.PixelType.FLOAT)\nHALF = Imath.PixelType(Imath.PixelType.HALF)\nUINT = Imath.PixelType(Imath.PixelType.UINT)\n\nNO_COMPRESSION = Imath.Compression(Imath.Compression.NO_COMPRESSION)\nRLE_COMPRESSION = Imath.Compression(Imath.Compression.RLE_COMPRESSION)\nZIPS_COMPRESSION = Imath.Compression(Imath.Compression.ZIPS_COMPRESSION)\nZIP_COMPRESSION = Imath.Compression(Imath.Compression.ZIP_COMPRESSION)\nPIZ_COMPRESSION = Imath.Compression(Imath.Compression.PIZ_COMPRESSION)\nPXR24_COMPRESSION = Imath.Compression(Imath.Compression.PXR24_COMPRESSION)\n\nNP_PRECISION = {\n \"FLOAT\": np.float32,\n \"HALF\": np.float16,\n \"UINT\": np.uint8\n}\n\n\ndef open(filename):\n # Check if the file is an EXR file\n if not OpenEXR.isOpenExrFile(filename):\n raise Exception(\"File '%s' is not an EXR file.\" % filename)\n # Return an `InputFile`\n return InputFile(OpenEXR.InputFile(filename), filename)\n\n\ndef read(filename, channels = \"default\", precision = FLOAT):\n f = open(filename)\n if _is_list(channels):\n # Construct an array of precisions\n return f.get_dict(channels, precision=precision)\n\n else:\n return f.get(channels, precision)\n\ndef read_all(filename, precision = FLOAT):\n f = open(filename)\n return f.get_all(precision=precision)\n\ndef write(filename, data, channel_names = None, precision = FLOAT, compression = PIZ_COMPRESSION):\n\n # Helper function add a third dimension to 2-dimensional matrices (single channel)\n def make_ndims_3(matrix):\n if matrix.ndim > 3 or matrix.ndim < 2:\n raise Exception(\"Invalid number of dimensions for the `matrix` argument.\")\n elif matrix.ndim == 2:\n matrix = np.expand_dims(matrix, -1)\n return matrix\n\n # Helper function to read channel names from default\n def get_channel_names(channel_names, depth):\n if channel_names:\n if depth is not len(channel_names):\n raise Exception(\"The provided channel names have the wrong length (%d vs %d).\" % (len(channel_names), depth))\n return channel_names\n elif depth in _default_channel_names:\n return _default_channel_names[depth]\n else:\n raise Exception(\"There are no suitable default channel names for data of depth %d\" % depth)\n\n #\n # Case 1, the `data` argument is a dictionary\n #\n if isinstance(data, dict):\n # Make sure everything has ndims 3\n for group, matrix in data.items():\n data[group] = make_ndims_3(matrix)\n\n # Prepare precisions\n if not isinstance(precision, dict):\n precisions = {group: precision for group in data.keys()}\n else:\n precisions = {group: precision.get(group, FLOAT) for group in data.keys()}\n\n # Prepare channel names\n if channel_names is None:\n channel_names = {}\n channel_names = {group: get_channel_names(channel_names.get(group), matrix.shape[2]) for group, matrix in data.items()}\n\n # Collect channels\n channels = {}\n channel_data = {}\n width = None\n height = None\n for group, matrix in data.items():\n # Read the depth of the current group\n # and set height and width variables if not set yet\n if width is None:\n height, width, depth = matrix.shape\n else:\n depth = matrix.shape[2]\n names = channel_names[group]\n # Check the number of channel names\n if len(names) != depth:\n raise Exception(\"Depth does not match the number of channel names for channel '%s'\" % group)\n for i, c in enumerate(names):\n if group == \"default\":\n channel_name = c\n else:\n channel_name = \"%s.%s\" % (group, c)\n channels[channel_name] = Imath.Channel(precisions[group])\n channel_data[channel_name] = matrix[:,:,i].astype(NP_PRECISION[str(precisions[group])]).tostring()\n\n # Save\n header = OpenEXR.Header(width, height)\n header['compression'] = compression\n header['channels'] = channels\n out = OpenEXR.OutputFile(filename, header)\n out.writePixels(channel_data)\n\n #\n # Case 2, the `data` argument is one matrix\n #\n elif isinstance(data, np.ndarray):\n data = make_ndims_3(data)\n height, width, depth = data.shape\n channel_names = get_channel_names(channel_names, depth)\n header = OpenEXR.Header(width, height)\n header['compression'] = compression\n header['channels'] = {c: Imath.Channel(precision) for c in channel_names}\n out = OpenEXR.OutputFile(filename, header)\n out.writePixels({c: data[:,:,i].astype(NP_PRECISION[str(precision)]).tostring() for i, c in enumerate(channel_names)})\n\n else:\n raise Exception(\"Invalid precision for the `data` argument. Supported are NumPy arrays and dictionaries.\")\n\n\ndef tonemap(matrix, gamma=2.2):\n return np.clip(matrix ** (1.0/gamma), 0, 1)\n\n\nclass InputFile(object):\n\n def __init__(self, input_file, filename=None):\n self.input_file = input_file\n\n if not input_file.isComplete():\n raise Exception(\"EXR file '%s' is not ready.\" % filename)\n\n header = input_file.header()\n dw = header['dataWindow']\n\n self.width = dw.max.x - dw.min.x + 1\n self.height = dw.max.y - dw.min.y + 1\n self.channels = sorted(header['channels'].keys(),key=_channel_sort_key)\n self.depth = len(self.channels)\n self.precisions = [c.type for c in header['channels'].values()]\n self.channel_precision = {c: v.type for c, v in header['channels'].items()}\n self.channel_map = defaultdict(list)\n self.root_channels = set()\n self._init_channel_map()\n\n def _init_channel_map(self):\n # Make a dictionary of subchannels per channel\n for c in self.channels:\n self.channel_map['all'].append(c)\n parts = c.split('.')\n if len(parts) == 1:\n self.root_channels.add('default')\n self.channel_map['default'].append(c)\n else:\n self.root_channels.add(parts[0])\n for i in range(1, len(parts)+1):\n key = \".\".join(parts[0:i])\n self.channel_map[key].append(c)\n\n def describe_channels(self):\n if 'default' in self.root_channels:\n for c in self.channel_map['default']:\n print (c)\n for group in sorted(list(self.root_channels)):\n if group != 'default':\n channels = self.channel_map[group]\n print(\"%-20s%s\" % (group, \",\".join([c[len(group)+1:] for c in channels])))\n\n def get(self, group = 'default', precision=FLOAT):\n channels = self.channel_map[group]\n\n if len(channels) == 0:\n print(\"I did't find any channels in group '%s'.\" % group)\n print(\"You could try:\")\n self.describe_channels()\n sys.exit()\n\n strings = self.input_file.channels(channels)\n\n matrix = np.zeros((self.height, self.width, len(channels)), dtype=NP_PRECISION[str(precision)])\n for i, string in enumerate(strings):\n precision = NP_PRECISION[str(self.channel_precision[channels[i]])]\n matrix[:,:,i] = np.fromstring(string, dtype = precision) \\\n .reshape(self.height, self.width)\n return matrix\n\n def get_all(self, precision = {}):\n return self.get_dict(self.root_channels, precision)\n\n def get_dict(self, groups = [], precision = {}):\n\n if not isinstance(precision, dict):\n precision = {group: precision for group in groups}\n\n return_dict = {}\n todo = []\n for group in groups:\n group_chans = self.channel_map[group]\n if len(group_chans) == 0:\n print(\"I didn't find any channels for the requested group '%s'.\" % group)\n print(\"You could try:\")\n self.describe_channels()\n sys.exit()\n if group in precision:\n p = precision[group]\n else:\n p = FLOAT\n matrix = np.zeros((self.height, self.width, len(group_chans)), dtype=NP_PRECISION[str(p)])\n return_dict[group] = matrix\n for i, c in enumerate(group_chans):\n todo.append({'group': group, 'id': i, 'channel': c})\n\n if len(todo) == 0:\n print(\"Please ask for some channels, I cannot process empty queries.\")\n print(\"You could try:\")\n self.describe_channels()\n sys.exit()\n\n strings = self.input_file.channels([c['channel'] for c in todo])\n\n for i, item in enumerate(todo):\n precision = NP_PRECISION[str(self.channel_precision[todo[i]['channel']])]\n return_dict[item['group']][:,:,item['id']] = \\\n np.fromstring(strings[i], dtype = precision) \\\n .reshape(self.height, self.width)\n return return_dict\n\n\ndef _sort_dictionary(key):\n if key == 'R' or key == 'r':\n return 10\n elif key == 'G' or key == 'g':\n return 20\n elif key == 'B' or key == 'b':\n return 30\n elif key == 'A' or key == 'a':\n return 40\n elif key == 'X' or key == 'x':\n return 110\n elif key == 'Y' or key == 'y':\n return 120\n elif key == 'Z' or key == 'z':\n return 130\n else:\n return key\n\n\ndef _channel_sort_key(i):\n return [_sort_dictionary(x) for x in i.split(\".\")]\n\n\n_default_channel_names = {\n 1: ['Z'],\n 2: ['X','Y'],\n 3: ['R','G','B'],\n 4: ['R','G','B','A']\n}\n\n\ndef _is_list(x):\n return isinstance(x, (list, tuple, np.ndarray))\n\n\n\n\n\n\n\n" ]
[ [ "numpy.fromstring", "numpy.expand_dims", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HS-YN/charades-webcam
[ "67e9ee4e5b79250bc525d63e0d3bf901ab24367f" ]
[ "conversion/pytorch-tf.py" ]
[ "# @leonidk\n\nfrom __future__ import print_function\n\nimport numpy as np \nimport tensorflow as tf\nimport torch\nimport torch.nn as nn\n\nimport torchvision.models as models\nimport torchvision\n\n\ndef load_partial_state(model, state_dict):\n # @chenyuntc\n sd = model.state_dict()\n for k, v in state_dict.items():\n k = k.replace('module.', '')\n if k not in sd or not sd[k].shape == v.shape:\n print('ignoring state key for loading: {}'.format(k))\n continue\n if isinstance(v, torch.nn.Parameter):\n v = v.data\n sd[k].copy_(v)\n\n\ndef change_output(model,nclass):\n if hasattr(model, 'classifier'):\n newcls = list(model.classifier.children())\n found = False\n for i, cls in reversed(list(enumerate(newcls))):\n if hasattr(cls, 'in_features'):\n newcls = newcls[:i] + [nn.Linear(cls.in_features, nclass)] + newcls[i + 1:]\n model.classifier = nn.Sequential(*newcls)\n found = True\n break\n if hasattr(cls, 'in_channels'):\n kwargs = {'kernel_size': (1, 1), 'stride': (1, 1)}\n newcls = newcls[:i] + [nn.Conv2d(cls.in_channels, nclass, **kwargs)] + newcls[i + 1:]\n model.classifier = nn.Sequential(*newcls)\n if hasattr(model, 'num_classes'):\n model.num_classes = nclass\n found = True\n break\n assert found\n\n\nmodel = models.squeezenet1_1(pretrained=True)\nstate = torch.load('/nfs.yoda/gsigurds/ai2/caches/rgbnet_squeezenet1/model_best.pth.tar')['state_dict']\nchange_output(model, 157) \nload_partial_state(model, state)\ndestination_py = 'squeezenet.py'\n\ntype_lookups = {}\noutfp = open(destination_py,'w')\noutfp.write('import tensorflow as tf\\n\\n')\nout_s = ''\ndef conv2d(c,**kwargs):\n padding = 'VALID' if c.padding[0] is 0 else 'SAME'\n filters = c.out_channels\n size = c.kernel_size\n parameters = [p for p in c.parameters()]\n W = parameters[0].data.numpy()\n if len(parameters) > 1:\n b = parameters[1].data.numpy()\n\n W = np.transpose(W,[2,3,1,0])\n\n wi = tf.constant_initializer(W)\n if len(parameters) > 1:\n bi = tf.constant_initializer(b)\n Wt = tf.get_variable('weights',shape=W.shape,initializer=wi)#,\n if 'print' not in kwargs or kwargs['print'] == True:\n outfp.write(out_s + 'W = tf.get_variable(\"weights\",shape=[{},{},{},{}])\\n'.format(*list(W.shape)))\n\n if len(parameters) > 1:\n bt = tf.get_variable('bias',shape=b.shape,initializer=bi)#,\n if 'print' not in kwargs or kwargs['print'] == True:\n outfp.write(out_s + 'b = tf.get_variable(\"bias\",shape=[{}])\\n'.format(b.shape[0]))\n x = tf.nn.conv2d(kwargs['inp'],Wt,[1,c.stride[0],c.stride[1],1],padding)\n if 'print' not in kwargs or kwargs['print'] == True:\n outfp.write(out_s + 'x = tf.nn.conv2d(x,W,[1,{},{},1],\"{}\")\\n'.format(c.stride[0],c.stride[1],padding))\n if len(parameters) > 1:\n x = tf.nn.bias_add(x,bt)\n if 'print' not in kwargs or kwargs['print'] == True:\n outfp.write(out_s + 'x = tf.nn.bias_add(x,b)\\n')\n\n return x\n\ndef relu(c,**kwargs):\n outfp.write(out_s + \"x = tf.nn.relu(x)\\n\")\n return tf.nn.relu(kwargs['inp'])\ndef max_pool(c,**kwargs):\n padding = 'VALID' if c.padding is 0 else 'SAME'\n outfp.write(out_s + \"x = tf.nn.max_pool(x,[1,{0},{0},1],strides=[1,{1},{1},1],padding='{2}')\\n\".format(\n c.kernel_size,c.stride,padding))\n x = tf.nn.max_pool(kwargs['inp'],[1,c.kernel_size,c.kernel_size,1],strides=[1,c.stride,c.stride,1],padding=padding)\n return x\ndef avg_pool(c,**kwargs):\n padding = 'VALID' if c.padding is 0 else 'SAME'\n outfp.write(out_s + \"x = tf.nn.avg_pool(x,[1,{0},{0},1],strides=[1,{1},{1},1],padding='{2}')\\n\".format(\n c.kernel_size,c.stride,padding))\n x = tf.nn.avg_pool(kwargs['inp'],[1,c.kernel_size,c.kernel_size,1],strides=[1,c.stride,c.stride,1],padding=padding)\n return x\ndef dropout(c,**kwargs):\n outfp.write(out_s + 'x = x\\n')\n return kwargs['inp']\ndef fire_module(c,**kwargs):\n global out_s\n\n # couldn't figure out how to\n # automatically unravel it\n outfp.write(out_s + \"x = fire_module(x,{0},{1},{2},{3})\\n\".format(\n c.squeeze.in_channels,c.squeeze.out_channels,c.expand1x1.out_channels,c.expand3x3.out_channels))\n with tf.variable_scope(\"fire\"):\n with tf.variable_scope(\"squeeze\"):\n s = conv2d(c.squeeze,inp=kwargs['inp'],print=False)\n s = tf.nn.relu(s)\n with tf.variable_scope(\"e11\"):\n e11 = conv2d(c.expand1x1,inp=s,print=False)\n e11 = tf.nn.relu(e11)\n with tf.variable_scope(\"e33\"):\n e33 = conv2d(c.expand3x3,inp=s,print=False)\n e33 = tf.nn.relu(e33)\n x = tf.concat([e11,e33],3)\n return x\n\ndef seq_container(c,**kwargs):\n global out_s\n x = kwargs['inp']\n for c2 in enumerate(c.children()):\n c2_class = c2[1].__class__\n if c2_class in type_lookups:\n outfp.write(out_s + \"with tf.variable_scope('{}'):\\n\".format('layer' + str(c2[0])))\n with tf.variable_scope('layer' + str(c2[0])):\n out_s = out_s + ' '\n x = type_lookups[c2_class](c2[1],inp = x)\n name = kwargs['name'] if 'name' in kwargs else ''\n outfp.write(out_s + \"self.layers.append(x)\\n\".format(name + str(c2[0])))\n\n out_s = out_s[:-4]\n else:\n unknown_class(c2[1])\n print(c2_class)\n return x\ndef batch_norm(c,**kwargs):\n print('batch_norm')\n return kwargs['inp']\ntype_lookups[torch.nn.modules.conv.Conv2d] = conv2d\ntype_lookups[torch.nn.modules.activation.ReLU] = relu\ntype_lookups[torch.nn.modules.container.Sequential] = seq_container\ntype_lookups[torch.nn.modules.pooling.MaxPool2d] = max_pool\ntype_lookups[torch.nn.modules.pooling.AvgPool2d] = avg_pool\ntype_lookups[torch.nn.modules.dropout.Dropout] = dropout\ntype_lookups[torchvision.models.squeezenet.Fire] = fire_module\ntype_lookups[torch.nn.modules.batchnorm.BatchNorm2d] = batch_norm\ntf.reset_default_graph()\ninput_image = tf.placeholder('float',shape=[None,None,None,3],name='input_image')\n\nif True:\n outfp.write('def fire_module(x,inp,sp,e11p,e33p):\\n')\n outfp.write(' with tf.variable_scope(\"fire\"):\\n')\n outfp.write(' with tf.variable_scope(\"squeeze\"):\\n')\n outfp.write(' W = tf.get_variable(\"weights\",shape=[1,1,inp,sp])\\n')\n outfp.write(' b = tf.get_variable(\"bias\",shape=[sp])\\n')\n outfp.write(' s = tf.nn.conv2d(x,W,[1,1,1,1],\"VALID\")+b\\n')\n outfp.write(' s = tf.nn.relu(s)\\n')\n outfp.write(' with tf.variable_scope(\"e11\"):\\n')\n outfp.write(' W = tf.get_variable(\"weights\",shape=[1,1,sp,e11p])\\n')\n outfp.write(' b = tf.get_variable(\"bias\",shape=[e11p])\\n')\n outfp.write(' e11 = tf.nn.conv2d(s,W,[1,1,1,1],\"VALID\")+b\\n')\n outfp.write(' e11 = tf.nn.relu(e11)\\n')\n outfp.write(' with tf.variable_scope(\"e33\"):\\n')\n outfp.write(' W = tf.get_variable(\"weights\",shape=[3,3,sp,e33p])\\n')\n outfp.write(' b = tf.get_variable(\"bias\",shape=[e33p])\\n')\n outfp.write(' e33 = tf.nn.conv2d(s,W,[1,1,1,1],\"SAME\")+b\\n')\n outfp.write(' e33 = tf.nn.relu(e33)\\n')\n outfp.write(' return tf.concat([e11,e33],3) \\n\\n')\n\n\nif len([_ for _ in model.children()]) == 2:\n outfp.write('class SqueezeNet:\\n')\n out_s += ' '\n outfp.write(out_s + 'def __init__(self):\\n')\n \n for idx,c in enumerate(model.children()):\n out_s = out_s + ' '\n\n if idx is 0:\n outfp.write(out_s+\"self.image = tf.placeholder('float',shape=[None,None,None,3],name='input_image')\\n\")\n outfp.write(out_s+\"self.layers = []\\n\")\n\n outfp.write(out_s+'x = self.image\\n')\n outfp.write(out_s+\"with tf.variable_scope('features'):\\n\")\n with tf.variable_scope('features'):\n out_s = out_s + ' '\n features = type_lookups[c.__class__](c,inp=input_image)\n out_s = out_s[:-4]\n\n outfp.write(out_s+'self.features = x\\n')\n\n elif idx is 1:\n outfp.write(out_s+\"with tf.variable_scope('classifier'):\\n\")\n with tf.variable_scope('classifier'):\n out_s = out_s + ' '\n classifier = type_lookups[c.__class__](c,inp=features)\n #classifier = tf.reshape(classifier,[-1,1000])\n classifier = tf.reshape(classifier,[-1,157])\n out_s = out_s[:-4]\n\n #outfp.write(out_s+'self.classifier = tf.reshape(x,[-1,1000])\\n')\n outfp.write(out_s+'self.classifier = tf.reshape(x,[-1,157])\\n')\n outfp.write('\\n\\n')\n out_s = out_s[:-4]\n\n\nelse:\n x = input_image\n for idx,c in enumerate(model.children()):\n x = type_lookups[c.__class__](c,inp=x)\noutfp.close()\n\n\nprint(classifier.get_shape(),classifier.name,input_image.name,features.name)\n\n\n\nfrom PIL import Image\nfrom scipy.misc import imresize\nimport os\n\nwith open('labels.txt') as fp:\n labels = [c[:-2].split(':')[1] for c in fp.readlines()]\ndef get_img(filename):\n vec = np.array(Image.open(filename))\n vec = imresize(vec,(224,224)).astype(np.float32)/255.0\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n vec = (vec-mean)/std\n return vec\n \nimg_dir = '.'\nimg_names = [x for x in os.listdir(img_dir) if 'jpeg' in x.lower()]\nimgs = [get_img(os.path.join(img_dir,x)) for x in img_names]\n\nsaver = tf.train.Saver()\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nscores = sess.run(classifier,feed_dict={input_image:np.array(imgs).reshape([-1,224,224,3])})\nfor idx,s in enumerate(np.argmax(scores,1)):\n print(img_names[idx],labels[s])\n\n\n\n\nsaver.save(sess, 'squeezenet.ckpt')\n\n\nfrom torch.autograd import Variable\ninput_data = torch.FloatTensor(np.transpose(np.array(imgs),[0,3,1,2]))\nmodel.eval()\npyt_scores = model(Variable(input_data))\nscores_ref = pyt_scores.data.numpy()\n\n\ndef rel_error(x, y):\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))\nprint(rel_error(scores,scores_ref))\n" ]
[ [ "tensorflow.get_variable", "tensorflow.concat", "torch.load", "tensorflow.nn.max_pool", "torch.autograd.Variable", "tensorflow.nn.conv2d", "tensorflow.reset_default_graph", "numpy.argmax", "tensorflow.Session", "tensorflow.train.Saver", "torch.nn.Sequential", "torch.nn.Conv2d", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "torch.nn.Linear", "tensorflow.nn.avg_pool", "numpy.transpose", "numpy.array", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "scipy.misc.imresize", "numpy.abs", "tensorflow.reshape", "tensorflow.constant_initializer", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.0", "0.19", "0.18", "1.2", "0.12", "0.10", "0.17", "0.16" ], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
Jmq14/VQA
[ "109a426eba8384c8e624f263ff6f52591dfc9153" ]
[ "student_code/coattention_net.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\"\"\"\nN: batch size\nS: length of sequence (26)\n\"\"\"\n\n\nclass AttentionNet(nn.Module):\n def __init__(self, n_feat, n_hidden):\n super().__init__()\n\n self.feature_head = nn.Linear(n_feat, n_hidden)\n self.attention_head = nn.Linear(n_feat, n_hidden)\n self.activate = nn.Sequential(\n nn.Tanh(),\n nn.Dropout(p=0.5)\n )\n\n self.predict = nn.Sequential(\n nn.Linear(n_hidden, 1),\n nn.Softmax(dim=1)\n )\n\n def forward(self, X, g=None):\n feat = self.feature_head(X)\n if g is not None:\n feat += self.attention_head(g)\n\n feat = self.activate(feat)\n w = self.predict(feat)\n att_x = torch.sum(w * X, dim=1, keepdim=True)\n\n return att_x # N x 1 x n_feat\n\n\nclass CoattentionNet(nn.Module):\n \"\"\"\n Predicts an answer to a question about an image using the Hierarchical Question-Image Co-Attention\n for Visual Question Answering (Lu et al, 2017) paper.\n \"\"\"\n def __init__(self, n_img, n_ques, n_ans, n_emb):\n super().__init__()\n self.word_level = nn.Sequential(\n nn.Linear(n_ques, n_emb),\n nn.Tanh(),\n nn.Dropout(p=0.5)\n )\n\n self.phase_level_1 = nn.Conv1d(n_emb, n_emb, kernel_size=1, padding=0)\n self.phase_level_2 = nn.Conv1d(n_emb, n_emb, kernel_size=2, padding=0)\n self.phase_level_3 = nn.Conv1d(n_emb, n_emb, kernel_size=3, padding=1)\n self.phase_level_activate = nn.Sequential(\n nn.Tanh(),\n nn.Dropout(p=0.5)\n )\n\n self.ques_level = nn.LSTM(input_size=n_emb, hidden_size=n_emb, batch_first=True)\n\n self.image_encoder = nn.Sequential(\n nn.Linear(n_img, n_emb),\n nn.Tanh(),\n nn.Dropout(p=0.5)\n )\n\n self.question_attention = AttentionNet(n_emb, n_hidden=512)\n self.image_attention = AttentionNet(n_emb, n_hidden=512)\n\n self.word_level_fuse = nn.Sequential(\n nn.Linear(n_emb, n_emb),\n nn.Tanh(),\n nn.Dropout(p=0.5)\n )\n self.phrase_level_fuse = nn.Sequential(\n nn.Linear(n_emb * 2, n_emb),\n nn.Tanh(),\n nn.Dropout(p=0.5)\n )\n self.ques_level_fuse = nn.Sequential(\n nn.Linear(n_emb * 2, n_emb * 2),\n nn.Tanh(),\n nn.Dropout(p=0.5)\n )\n\n self.predict = nn.Linear(n_emb * 2, n_ans)\n # softmax is implemented in loss function\n\n def _alternating_co_attention(self, Q, V):\n s = self.question_attention(Q, None)\n v = self.image_attention(V, s)\n q = self.question_attention(Q, v)\n return v.squeeze(), q.squeeze() # N x n_feat\n\n def forward(self, image, question_encoding):\n\n # ============= Question Hierarchy =============\n # word level\n word_feat = self.word_level(question_encoding) # N x S x n_emb\n\n # phase level\n word_feat_T = word_feat.permute(0, 2, 1) # N x n_emb x S\n phase_feat_1 = self.phase_level_1(word_feat_T)\n phase_feat_2 = self.phase_level_2(F.pad(word_feat_T, (0, 1)))\n phase_feat_3 = self.phase_level_3(word_feat_T)\n phase_feat_T = torch.max(torch.cat([\n phase_feat_1[:, :, :, None],\n phase_feat_2[:, :, :, None],\n phase_feat_3[:, :, :, None]], 3), 3, keepdim=False)[0]\n phase_feat = phase_feat_T.permute(0, 2, 1) # N x S x n_emb\n # print(phase_feat_1.shape, phase_feat_2.shape, phase_feat_3.shape, phase_feat.shape)\n phase_feat = self.phase_level_activate(phase_feat)\n\n # question level\n # print(phase_feat.shape)\n ques_feat, _ = self.ques_level(phase_feat) # N x S x n_emb\n\n # ============= Alter Attention =============\n img_embed = self.image_encoder(image)\n\n v_w, q_w = self._alternating_co_attention(word_feat, img_embed)\n v_p, q_p = self._alternating_co_attention(phase_feat, img_embed)\n v_s, q_s = self._alternating_co_attention(ques_feat, img_embed)\n\n # -- fuse\n word_att_feat = self.word_level_fuse(q_w + v_w)\n\n phase_att_feat = torch.cat([q_p + v_p, word_att_feat], dim=1)\n phrase_att_feat = self.phrase_level_fuse(phase_att_feat)\n\n ques_att_feat = torch.cat([q_s + v_s, phrase_att_feat], dim=1)\n ques_att_feat = self.ques_level_fuse(ques_att_feat)\n\n output = self.predict(ques_att_feat)\n\n return output\n\n" ]
[ [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.nn.LSTM", "torch.cat", "torch.sum", "torch.nn.Tanh", "torch.nn.Linear", "torch.nn.Conv1d", "torch.nn.functional.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]